diff --git a/.github/workflows/dco.yaml b/.github/workflows/dco.yaml index 599d0d1a025..d7a8450d199 100644 --- a/.github/workflows/dco.yaml +++ b/.github/workflows/dco.yaml @@ -12,7 +12,7 @@ jobs: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Set up Python 3.x - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.x' - name: Check DCO diff --git a/.github/workflows/nightly_build.yaml b/.github/workflows/nightly_build.yaml index 12e41eecd75..0a45d43ca97 100644 --- a/.github/workflows/nightly_build.yaml +++ b/.github/workflows/nightly_build.yaml @@ -21,7 +21,7 @@ jobs: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install cosign - uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0 + uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0 with: cosign-release: v2.2.3 - name: Install regctl diff --git a/.github/workflows/pr_build.yaml b/.github/workflows/pr_build.yaml index d03d41c9922..07c1afae6ea 100644 --- a/.github/workflows/pr_build.yaml +++ b/.github/workflows/pr_build.yaml @@ -146,7 +146,7 @@ jobs: - name: Build artifacts run: ./.github/workflows/scripts/build_artifacts.sh ${{ runner.os }} - name: Archive artifacts - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: binaries-linux path: ./artifacts/ @@ -186,7 +186,7 @@ jobs: - name: Export images run: tar -czvf images.tar.gz *-image.tar - name: Archive images - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: images path: images.tar.gz @@ -215,7 +215,7 @@ jobs: docker save spire-server-windows:latest-local spire-agent-windows:latest-local oidc-discovery-provider-windows:latest-local -o images-windows.tar gzip images-windows.tar - name: Archive images - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: images-windows path: images-windows.tar.gz @@ -396,7 +396,7 @@ jobs: path: .build key: ${{ runner.os }}-tools-${{ hashFiles('.go-version','Makefile') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -479,7 +479,7 @@ jobs: path: .build key: ${{ runner.os }}-tools-${{ hashFiles('.go-version','Makefile') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -523,7 +523,7 @@ jobs: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -568,7 +568,7 @@ jobs: path: .build key: ${{ runner.os }}-tools-${{ hashFiles('.go-version','Makefile') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -584,7 +584,7 @@ jobs: - name: Build artifacts run: ./.github/workflows/scripts/build_artifacts.sh ${{ runner.os }} - name: Archive artifacts - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: binaries-windows path: ./artifacts/ diff --git a/.github/workflows/release_build.yaml b/.github/workflows/release_build.yaml index 9197284d900..00d4d0ec7b2 100644 --- a/.github/workflows/release_build.yaml +++ b/.github/workflows/release_build.yaml @@ -137,7 +137,7 @@ jobs: - name: Build artifacts run: ./.github/workflows/scripts/build_artifacts.sh ${{ runner.os }} - name: Archive artifacts - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: binaries-linux path: ./artifacts/ @@ -172,7 +172,7 @@ jobs: - name: Export images run: tar -czvf images.tar.gz *-image.tar - name: Archive images - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: images path: images.tar.gz @@ -200,7 +200,7 @@ jobs: docker save spire-server-windows:latest-local spire-agent-windows:latest-local oidc-discovery-provider-windows:latest-local -o images-windows.tar gzip images-windows.tar - name: Archive images - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: images-windows path: images-windows.tar.gz @@ -380,7 +380,7 @@ jobs: path: .build key: ${{ runner.os }}-tools-${{ hashFiles('.go-version','Makefile') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -449,7 +449,7 @@ jobs: path: .build key: ${{ runner.os }}-tools-${{ hashFiles('.go-version','Makefile') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -486,7 +486,7 @@ jobs: path: ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -524,7 +524,7 @@ jobs: path: .build key: ${{ runner.os }}-tools-${{ hashFiles('.go-version','Makefile') }} - name: Install msys2 - uses: msys2/setup-msys2@5df0ca6cbf14efcd08f8d5bd5e049a3cc8e07fd2 # v2.24.0 + uses: msys2/setup-msys2@ddf331adaebd714795f1042345e6ca57bd66cea8 # v2.24.1 with: msystem: MINGW64 update: true @@ -540,7 +540,7 @@ jobs: path: ./bin/ key: ${{ runner.os }}-executables-${{ hashFiles('**/*.exe') }} - name: Archive artifacts - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 with: name: binaries-windows path: ./artifacts/ @@ -589,7 +589,7 @@ jobs: - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install cosign - uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0 + uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0 with: cosign-release: v2.2.3 - name: Install regctl diff --git a/.go-version b/.go-version index 89144dbc38f..a6c2798a482 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.3 +1.23.0 diff --git a/.golangci.yml b/.golangci.yml index d8e2b63d8ee..9306540caff 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,11 +2,11 @@ run: # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 12m - skip-dirs: +issues: + exclude-dirs: - testdata$ - test/mock - - skip-files: + exclude-files: - ".*\\.pb\\.go" linters: @@ -29,3 +29,6 @@ linters-settings: revive: # minimal confidence for issues, default is 0.8 confidence: 0.0 + rules: + - name: unused-parameter + disabled: true diff --git a/.spire-tool-versions b/.spire-tool-versions index d39496afeee..97d5b48257a 100644 --- a/.spire-tool-versions +++ b/.spire-tool-versions @@ -1,3 +1,3 @@ -golangci_lint v1.55.0 +golangci_lint v1.60.1 markdown_lint v0.37.0 protoc 24.4 diff --git a/CHANGELOG.md b/CHANGELOG.md index b58e2886fcc..1c7daabd116 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## [1.10.4] - 2024-09-12 + +### Fixed + +- Add missing commits to spire-plugin-sdk and spire-api-sdk releases (spiffe/spire-api-sdk#66, spiffe/spire-plugin-sdk#39) + +## [1.10.3] - 2024-09-03 + +### Fixed + +- Regression in agent health check, requiring the agent to have an SVID on disk to be healthy (#5459) + +## [1.10.2] - 2024-09-03 + +### Added + +- `http_challenge` NodeAttestor plugin (#4909) +- Experimental support for validating container image signatures through Sigstore selectors in the docker Workload Attestor (#5272) +- Metrics for monitoring the event-based cache (#5411) + +### Changed + +- Delegated Identity API to allow subscription by process ID (#5272) +- Agent Debug endpoint to count SVIDs by type (#5352) +- Agent health check to report an unhealthy status until the Agent SVID is attested (#5298) +- Small documentation improvements (#5393) + +### Fixed + +- `aws_iid` NodeAttestor to properly handle multiple network interfaces (#5300) +- Server configuration to correctly propagate the `sql_transaction_timeout` setting in the experimental events-based cache (#5345) + ## [1.10.1] - 2024-08-01 ### Added diff --git a/Dockerfile b/Dockerfile index 38abadf4fcd..0b40d0bb20a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ # Build stage ARG goversion # Use alpine3.18 until go-sqlite works in 3.19 -FROM --platform=${BUILDPLATFORM} golang:${goversion}-alpine3.18 as base +FROM --platform=${BUILDPLATFORM} golang:${goversion}-alpine3.20 as base WORKDIR /spire RUN apk --no-cache --update add file bash clang lld pkgconfig git make COPY go.* ./ @@ -15,7 +15,7 @@ COPY . . # when bumping to a new version analyze the new version for security issues # then use crane to lookup the digest of that version so we are immutable # crane digest tonistiigi/xx:1.3.0 -FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:904fe94f236d36d65aeb5a2462f88f2c537b8360475f6342e7599194f291fb7e AS xx +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.5.0@sha256:0c6a569797744e45955f39d4f7538ac344bfb7ebf0a54006a0a4297b153ccf0f AS xx FROM --platform=${BUILDPLATFORM} base as builder ARG TAG diff --git a/RELEASING.md b/RELEASING.md index 6167821c3fd..e404891f957 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -1,6 +1,6 @@ # Release and Branch Management -The SPIRE project maintains active support for both the current and the previous major versions. All active development occurs in the `main` branch. Version branches are used for minor releases of the previous major version when necessary. +The SPIRE project maintains active support for both the current and the previous minor versions. All active development occurs in the `main` branch. Version branches are used for patch releases of the previous minor version when necessary. ## Version Branches diff --git a/cmd/spire-server/cli/authoritycommon/authoritycommon.go b/cmd/spire-server/cli/authoritycommon/authoritycommon.go new file mode 100644 index 00000000000..83c16e85db2 --- /dev/null +++ b/cmd/spire-server/cli/authoritycommon/authoritycommon.go @@ -0,0 +1,170 @@ +package authoritycommon_test + +import ( + "bytes" + "context" + "testing" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/test/spiretest" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +var ( + AvailableFormats = []string{"pretty", "json"} +) + +type localAuthorityTest struct { + Stdin *bytes.Buffer + Stdout *bytes.Buffer + Stderr *bytes.Buffer + Args []string + Server *fakeLocalAuthorityServer + Client cli.Command +} + +func (s *localAuthorityTest) afterTest(t *testing.T) { + t.Logf("TEST:%s", t.Name()) + t.Logf("STDOUT:\n%s", s.Stdout.String()) + t.Logf("STDIN:\n%s", s.Stdin.String()) + t.Logf("STDERR:\n%s", s.Stderr.String()) +} + +func SetupTest(t *testing.T, newClient func(*commoncli.Env) cli.Command) *localAuthorityTest { + server := &fakeLocalAuthorityServer{} + + addr := spiretest.StartGRPCServer(t, func(s *grpc.Server) { + localauthorityv1.RegisterLocalAuthorityServer(s, server) + }) + + stdin := new(bytes.Buffer) + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + + client := newClient(&commoncli.Env{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + }) + + test := &localAuthorityTest{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + Args: []string{common.AddrArg, common.GetAddr(addr)}, + Server: server, + Client: client, + } + + t.Cleanup(func() { + test.afterTest(t) + }) + + return test +} + +type fakeLocalAuthorityServer struct { + localauthorityv1.UnsafeLocalAuthorityServer + + ActiveJWT, + PreparedJWT, + OldJWT, + ActiveX509, + PreparedX509, + OldX509, + TaintedX509, + RevokedX509, + TaintedJWT, + RevokedJWT *localauthorityv1.AuthorityState + + Err error +} + +func (s *fakeLocalAuthorityServer) GetJWTAuthorityState(context.Context, *localauthorityv1.GetJWTAuthorityStateRequest) (*localauthorityv1.GetJWTAuthorityStateResponse, error) { + return &localauthorityv1.GetJWTAuthorityStateResponse{ + Active: s.ActiveJWT, + Prepared: s.PreparedJWT, + Old: s.OldJWT, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) PrepareJWTAuthority(context.Context, *localauthorityv1.PrepareJWTAuthorityRequest) (*localauthorityv1.PrepareJWTAuthorityResponse, error) { + return &localauthorityv1.PrepareJWTAuthorityResponse{ + PreparedAuthority: s.PreparedJWT, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) ActivateJWTAuthority(context.Context, *localauthorityv1.ActivateJWTAuthorityRequest) (*localauthorityv1.ActivateJWTAuthorityResponse, error) { + return &localauthorityv1.ActivateJWTAuthorityResponse{ + ActivatedAuthority: s.ActiveJWT, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) TaintJWTAuthority(context.Context, *localauthorityv1.TaintJWTAuthorityRequest) (*localauthorityv1.TaintJWTAuthorityResponse, error) { + return &localauthorityv1.TaintJWTAuthorityResponse{ + TaintedAuthority: s.TaintedJWT, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) RevokeJWTAuthority(context.Context, *localauthorityv1.RevokeJWTAuthorityRequest) (*localauthorityv1.RevokeJWTAuthorityResponse, error) { + return &localauthorityv1.RevokeJWTAuthorityResponse{ + RevokedAuthority: s.RevokedJWT, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) GetX509AuthorityState(context.Context, *localauthorityv1.GetX509AuthorityStateRequest) (*localauthorityv1.GetX509AuthorityStateResponse, error) { + return &localauthorityv1.GetX509AuthorityStateResponse{ + Active: s.ActiveX509, + Prepared: s.PreparedX509, + Old: s.OldX509, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) PrepareX509Authority(context.Context, *localauthorityv1.PrepareX509AuthorityRequest) (*localauthorityv1.PrepareX509AuthorityResponse, error) { + return &localauthorityv1.PrepareX509AuthorityResponse{ + PreparedAuthority: s.PreparedX509, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) ActivateX509Authority(context.Context, *localauthorityv1.ActivateX509AuthorityRequest) (*localauthorityv1.ActivateX509AuthorityResponse, error) { + return &localauthorityv1.ActivateX509AuthorityResponse{ + ActivatedAuthority: s.ActiveX509, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) TaintX509Authority(context.Context, *localauthorityv1.TaintX509AuthorityRequest) (*localauthorityv1.TaintX509AuthorityResponse, error) { + return &localauthorityv1.TaintX509AuthorityResponse{ + TaintedAuthority: s.TaintedX509, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) TaintX509UpstreamAuthority(context.Context, *localauthorityv1.TaintX509UpstreamAuthorityRequest) (*localauthorityv1.TaintX509UpstreamAuthorityResponse, error) { + return &localauthorityv1.TaintX509UpstreamAuthorityResponse{}, s.Err +} + +func (s *fakeLocalAuthorityServer) RevokeX509Authority(context.Context, *localauthorityv1.RevokeX509AuthorityRequest) (*localauthorityv1.RevokeX509AuthorityResponse, error) { + return &localauthorityv1.RevokeX509AuthorityResponse{ + RevokedAuthority: s.RevokedX509, + }, s.Err +} + +func (s *fakeLocalAuthorityServer) RevokeX509UpstreamAuthority(context.Context, *localauthorityv1.RevokeX509UpstreamAuthorityRequest) (*localauthorityv1.RevokeX509UpstreamAuthorityResponse, error) { + return &localauthorityv1.RevokeX509UpstreamAuthorityResponse{}, s.Err +} + +func RequireOutputBasedOnFormat(t *testing.T, format, stdoutString string, expectedStdoutPretty, expectedStdoutJSON string) { + switch format { + case "pretty": + require.Contains(t, stdoutString, expectedStdoutPretty) + case "json": + if expectedStdoutJSON != "" { + require.JSONEq(t, expectedStdoutJSON, stdoutString) + } else { + require.Empty(t, stdoutString) + } + } +} diff --git a/cmd/spire-server/cli/cli.go b/cmd/spire-server/cli/cli.go index 1ce382cf714..84ea882296b 100644 --- a/cmd/spire-server/cli/cli.go +++ b/cmd/spire-server/cli/cli.go @@ -3,6 +3,8 @@ package cli import ( "context" stdlog "log" + "os" + "strings" "github.com/mitchellh/cli" "github.com/spiffe/spire/cmd/spire-server/cli/agent" @@ -11,11 +13,14 @@ import ( "github.com/spiffe/spire/cmd/spire-server/cli/federation" "github.com/spiffe/spire/cmd/spire-server/cli/healthcheck" "github.com/spiffe/spire/cmd/spire-server/cli/jwt" + localauthority_jwt "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/jwt" + localauthority_x509 "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/x509" "github.com/spiffe/spire/cmd/spire-server/cli/logger" "github.com/spiffe/spire/cmd/spire-server/cli/run" "github.com/spiffe/spire/cmd/spire-server/cli/token" "github.com/spiffe/spire/cmd/spire-server/cli/validate" "github.com/spiffe/spire/cmd/spire-server/cli/x509" + "github.com/spiffe/spire/pkg/common/fflag" "github.com/spiffe/spire/pkg/common/log" "github.com/spiffe/spire/pkg/common/version" ) @@ -126,9 +131,67 @@ func (cc *CLI) Run(ctx context.Context, args []string) int { }, } + // TODO: Remove this when the forced_rotation feature flag is no longer + // needed. Refer to https://github.com/spiffe/spire/issues/5398. + addCommandsEnabledByFFlags(c.Commands) + exitStatus, err := c.Run() if err != nil { stdlog.Println(err) } return exitStatus } + +// addCommandsEnabledByFFlags adds commands that are currently available only +// through a feature flag. +// Feature flags support through the fflag package in SPIRE Server is +// designed to work only with the run command and the config file. +// Since feature flags are intended to be used by developers of a specific +// feature only, exposing them through command line arguments is not +// convenient. Instead, we use the SPIRE_SERVER_FFLAGS environment variable +// to read the configured SPIRE Server feature flags from the environment +// when other commands may be enabled through feature flags. +func addCommandsEnabledByFFlags(commands map[string]cli.CommandFactory) { + fflagsEnv := os.Getenv("SPIRE_SERVER_FFLAGS") + fflags := strings.Split(fflagsEnv, " ") + flagForcedRotationFound := false + for _, ff := range fflags { + if ff == string(fflag.FlagForcedRotation) { + flagForcedRotationFound = true + break + } + } + + if flagForcedRotationFound { + commands["localauthority x509 show"] = func() (cli.Command, error) { + return localauthority_x509.NewX509ShowCommand(), nil + } + commands["localauthority x509 prepare"] = func() (cli.Command, error) { + return localauthority_x509.NewX509PrepareCommand(), nil + } + commands["localauthority x509 activate"] = func() (cli.Command, error) { + return localauthority_x509.NewX509ActivateCommand(), nil + } + commands["localauthority x509 taint"] = func() (cli.Command, error) { + return localauthority_x509.NewX509TaintCommand(), nil + } + commands["localauthority x509 revoke"] = func() (cli.Command, error) { + return localauthority_x509.NewX509RevokeCommand(), nil + } + commands["localauthority jwt show"] = func() (cli.Command, error) { + return localauthority_jwt.NewJWTShowCommand(), nil + } + commands["localauthority jwt prepare"] = func() (cli.Command, error) { + return localauthority_jwt.NewJWTPrepareCommand(), nil + } + commands["localauthority jwt activate"] = func() (cli.Command, error) { + return localauthority_jwt.NewJWTActivateCommand(), nil + } + commands["localauthority jwt taint"] = func() (cli.Command, error) { + return localauthority_jwt.NewJWTTaintCommand(), nil + } + commands["localauthority jwt revoke"] = func() (cli.Command, error) { + return localauthority_jwt.NewJWTRevokeCommand(), nil + } + } +} diff --git a/cmd/spire-server/cli/entry/create.go b/cmd/spire-server/cli/entry/create.go index 31cc157991e..7de2c66aa1f 100644 --- a/cmd/spire-server/cli/entry/create.go +++ b/cmd/spire-server/cli/entry/create.go @@ -45,11 +45,6 @@ type createCommand struct { // Entry hint, used to disambiguate entries with the same SPIFFE ID hint string - // TTL for x509 and JWT SVIDs issued to this workload, unless type specific TTLs are set. - // This field is deprecated in favor of the x509SVIDTTL and jwtSVIDTTL fields and will be - // removed in a future release. - ttl int - // TTL for x509 SVIDs issued to this workload x509SVIDTTL int @@ -94,9 +89,8 @@ func (c *createCommand) AppendFlags(f *flag.FlagSet) { f.StringVar(&c.entryID, "entryID", "", "A custom ID for this registration entry (optional). If not set, a new entry ID will be generated") f.StringVar(&c.parentID, "parentID", "", "The SPIFFE ID of this record's parent") f.StringVar(&c.spiffeID, "spiffeID", "", "The SPIFFE ID that this record represents") - f.IntVar(&c.ttl, "ttl", 0, "The lifetime, in seconds, for SVIDs issued based on this registration entry. This flag is deprecated in favor of x509SVIDTTL and jwtSVIDTTL and will be removed in a future version") - f.IntVar(&c.x509SVIDTTL, "x509SVIDTTL", 0, "The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. Overrides ttl flag") - f.IntVar(&c.jwtSVIDTTL, "jwtSVIDTTL", 0, "The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. Overrides ttl flag") + f.IntVar(&c.x509SVIDTTL, "x509SVIDTTL", 0, "The lifetime, in seconds, for x509-SVIDs issued based on this registration entry.") + f.IntVar(&c.jwtSVIDTTL, "jwtSVIDTTL", 0, "The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry.") f.StringVar(&c.path, "data", "", "Path to a file containing registration JSON (optional). If set to '-', read the JSON from stdin.") f.Var(&c.selectors, "selector", "A colon-delimited type:value selector. Can be used more than once") f.Var(&c.federatesWith, "federatesWith", "SPIFFE ID of a trust domain to federate with. Can be used more than once") @@ -158,10 +152,6 @@ func (c *createCommand) validate() (err error) { return errors.New("a SPIFFE ID is required") } - if c.ttl < 0 { - return errors.New("a positive TTL is required") - } - if c.x509SVIDTTL < 0 { return errors.New("a positive x509-SVID TTL is required") } @@ -170,10 +160,6 @@ func (c *createCommand) validate() (err error) { return errors.New("a positive JWT-SVID TTL is required") } - if c.ttl > 0 && (c.x509SVIDTTL > 0 || c.jwtSVIDTTL > 0) { - return errors.New("use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag") - } - return nil } @@ -202,18 +188,6 @@ func (c *createCommand) parseConfig() ([]*types.Entry, error) { Hint: c.hint, } - // c.ttl is deprecated but usable if the new c.x509Svid field is not used. - // c.ttl should not be used to set the jwtSVIDTTL value because the previous - // behavior was to have a hard-coded 5 minute JWT TTL no matter what the value - // of ttl was set to. - // validate(...) ensures that either the new fields or the deprecated field is - // used, but never a mixture. - // - // https://github.com/spiffe/spire/issues/2700 - if e.X509SvidTtl == 0 { - e.X509SvidTtl = int32(c.ttl) - } - selectors := []*types.Selector{} for _, s := range c.selectors { cs, err := util.ParseSelector(s) diff --git a/cmd/spire-server/cli/entry/create_test.go b/cmd/spire-server/cli/entry/create_test.go index 98009951b11..eef1f73f00b 100644 --- a/cmd/spire-server/cli/entry/create_test.go +++ b/cmd/spire-server/cli/entry/create_test.go @@ -54,7 +54,7 @@ func TestCreate(t *testing.T) { }, } - fakeRespOKFromCmd2 := &entryv1.BatchCreateEntryResponse{ + fakeRespOKFromCmdWithoutJwtTtl := &entryv1.BatchCreateEntryResponse{ Results: []*entryv1.BatchCreateEntryResponse_Result{ { Entry: &types.Entry{ @@ -186,28 +186,16 @@ func TestCreate(t *testing.T) { expErrJSON: "Error: selector \"unix\" must be formatted as type:value\n", }, { - name: "Negative TTL", - args: []string{"-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "-10"}, - expErrPretty: "Error: a positive TTL is required\n", - expErrJSON: "Error: a positive TTL is required\n", + name: "Negative X509SvidTtl", + args: []string{"-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-x509SVIDTTL", "-10"}, + expErrPretty: "Error: a positive x509-SVID TTL is required\n", + expErrJSON: "Error: a positive x509-SVID TTL is required\n", }, { - name: "Invalid TTL and X509SvidTtl", - args: []string{"-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "10", "-x509SVIDTTL", "20"}, - expErrPretty: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - expErrJSON: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - }, - { - name: "Invalid TTL and JwtSvidTtl", - args: []string{"-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "10", "-jwtSVIDTTL", "20"}, - expErrPretty: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - expErrJSON: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - }, - { - name: "Invalid TTL and both X509SvidTtl and JwtSvidTtl", - args: []string{"-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "10", "-x509SVIDTTL", "20", "-jwtSVIDTTL", "30"}, - expErrPretty: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - expErrJSON: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", + name: "Negative jwtSVIDTTL", + args: []string{"-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-jwtSVIDTTL", "-10"}, + expErrPretty: "Error: a positive JWT-SVID TTL is required\n", + expErrJSON: "Error: a positive JWT-SVID TTL is required\n", }, { name: "Federated node entries", @@ -346,7 +334,7 @@ StoreSvid : true "-parentID", "spiffe://example.org/parent", "-selector", "zebra:zebra:2000", "-selector", "alpha:alpha:2000", - "-ttl", "60", + "-x509SVIDTTL", "60", "-federatesWith", "spiffe://domaina.test", "-federatesWith", "spiffe://domainb.test", "-admin", @@ -376,111 +364,7 @@ StoreSvid : true }, }, }, - fakeResp: fakeRespOKFromCmd2, - expOutPretty: fmt.Sprintf(`Entry ID : entry-id -SPIFFE ID : spiffe://example.org/workload -Parent ID : spiffe://example.org/parent -Revision : 0 -Downstream : true -X509-SVID TTL : 60 -JWT-SVID TTL : default -Expiration time : %s -Selector : zebra:zebra:2000 -Selector : alpha:alpha:2000 -FederatesWith : spiffe://domaina.test -FederatesWith : spiffe://domainb.test -DNS name : unu1000 -DNS name : ung1000 -Admin : true -StoreSvid : true - -`, time.Unix(1552410266, 0).UTC()), - expOutJSON: `{ - "results": [ - { - "status": { - "code": 0, - "message": "OK" - }, - "entry": { - "id": "entry-id", - "spiffe_id": { - "trust_domain": "example.org", - "path": "/workload" - }, - "parent_id": { - "trust_domain": "example.org", - "path": "/parent" - }, - "selectors": [ - { - "type": "zebra", - "value": "zebra:2000" - }, - { - "type": "alpha", - "value": "alpha:2000" - } - ], - "x509_svid_ttl": 60, - "federates_with": [ - "spiffe://domaina.test", - "spiffe://domainb.test" - ], - "hint": "", - "admin": true, - "created_at": "1547583197", - "downstream": true, - "expires_at": "1552410266", - "dns_names": [ - "unu1000", - "ung1000" - ], - "revision_number": "0", - "store_svid": true, - "jwt_svid_ttl": 0 - } - } - ] -}`, - }, - { - name: "Create succeeds using deprecated command line arguments", - args: []string{ - "-spiffeID", "spiffe://example.org/workload", - "-parentID", "spiffe://example.org/parent", - "-selector", "zebra:zebra:2000", - "-selector", "alpha:alpha:2000", - "-ttl", "60", - "-federatesWith", "spiffe://domaina.test", - "-federatesWith", "spiffe://domainb.test", - "-admin", - "-entryExpiry", "1552410266", - "-dns", "unu1000", - "-dns", "ung1000", - "-downstream", - "-storeSVID", - }, - expReq: &entryv1.BatchCreateEntryRequest{ - Entries: []*types.Entry{ - { - SpiffeId: &types.SPIFFEID{TrustDomain: "example.org", Path: "/workload"}, - ParentId: &types.SPIFFEID{TrustDomain: "example.org", Path: "/parent"}, - Selectors: []*types.Selector{ - {Type: "zebra", Value: "zebra:2000"}, - {Type: "alpha", Value: "alpha:2000"}, - }, - X509SvidTtl: 60, - FederatesWith: []string{"spiffe://domaina.test", "spiffe://domainb.test"}, - Admin: true, - ExpiresAt: 1552410266, - DnsNames: []string{"unu1000", "ung1000"}, - Downstream: true, - StoreSvid: true, - }, - }, - }, - fakeResp: fakeRespOKFromCmd2, + fakeResp: fakeRespOKFromCmdWithoutJwtTtl, expOutPretty: fmt.Sprintf(`Entry ID : entry-id SPIFFE ID : spiffe://example.org/workload Parent ID : spiffe://example.org/parent diff --git a/cmd/spire-server/cli/entry/update.go b/cmd/spire-server/cli/entry/update.go index e2a22a5b92d..4b1503819ce 100644 --- a/cmd/spire-server/cli/entry/update.go +++ b/cmd/spire-server/cli/entry/update.go @@ -44,9 +44,6 @@ type updateCommand struct { // Whether or not the entry is for a downstream SPIRE server downstream bool - // TTL for certificates issued to this workload - ttl int - // TTL for x509 SVIDs issued to this workload x509SvidTTL int @@ -88,9 +85,8 @@ func (c *updateCommand) AppendFlags(f *flag.FlagSet) { f.StringVar(&c.entryID, "entryID", "", "The Registration Entry ID of the record to update") f.StringVar(&c.parentID, "parentID", "", "The SPIFFE ID of this record's parent") f.StringVar(&c.spiffeID, "spiffeID", "", "The SPIFFE ID that this record represents") - f.IntVar(&c.ttl, "ttl", 0, "The lifetime, in seconds, for SVIDs issued based on this registration entry. This flag is deprecated in favor of x509SVIDTTL and jwtSVIDTTL and will be removed in a future version") - f.IntVar(&c.x509SvidTTL, "x509SVIDTTL", 0, "The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. Overrides ttl flag") - f.IntVar(&c.jwtSvidTTL, "jwtSVIDTTL", 0, "The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. Overrides ttl flag") + f.IntVar(&c.x509SvidTTL, "x509SVIDTTL", 0, "The lifetime, in seconds, for x509-SVIDs issued based on this registration entry.") + f.IntVar(&c.jwtSvidTTL, "jwtSVIDTTL", 0, "The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry.") f.StringVar(&c.path, "data", "", "Path to a file containing registration JSON (optional). If set to '-', read the JSON from stdin.") f.Var(&c.selectors, "selector", "A colon-delimited type:value selector. Can be used more than once") f.Var(&c.federatesWith, "federatesWith", "SPIFFE ID of a trust domain to federate with. Can be used more than once") @@ -151,10 +147,6 @@ func (c *updateCommand) validate() (err error) { return errors.New("a SPIFFE ID is required") } - if c.ttl < 0 { - return errors.New("a positive TTL is required") - } - if c.x509SvidTTL < 0 { return errors.New("a positive x509-SVID TTL is required") } @@ -163,10 +155,6 @@ func (c *updateCommand) validate() (err error) { return errors.New("a positive JWT-SVID TTL is required") } - if c.ttl > 0 && (c.x509SvidTTL > 0 || c.jwtSvidTTL > 0) { - return errors.New("use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag") - } - return nil } @@ -193,18 +181,6 @@ func (c *updateCommand) parseConfig() ([]*types.Entry, error) { Hint: c.hint, } - // c.ttl is deprecated but usable if the new c.x509Svid field is not used. - // c.ttl should not be used to set the jwtSVIDTTL value because the previous - // behavior was to have a hard-coded 5 minute JWT TTL no matter what the value - // of ttl was set to. - // validate(...) ensures that either the new fields or the deprecated field is - // used, but never a mixture. - // - // https://github.com/spiffe/spire/issues/2700 - if e.X509SvidTtl == 0 { - e.X509SvidTtl = int32(c.ttl) - } - selectors := []*types.Selector{} for _, s := range c.selectors { cs, err := util.ParseSelector(s) diff --git a/cmd/spire-server/cli/entry/update_test.go b/cmd/spire-server/cli/entry/update_test.go index 767ff72e458..0befd968514 100644 --- a/cmd/spire-server/cli/entry/update_test.go +++ b/cmd/spire-server/cli/entry/update_test.go @@ -321,24 +321,6 @@ func TestUpdate(t *testing.T) { JwtSvidTtl: 300, } - entry5 := &types.Entry{ - Id: "entry-id", - SpiffeId: &types.SPIFFEID{TrustDomain: "example.org", Path: "/workload"}, - ParentId: &types.SPIFFEID{TrustDomain: "example.org", Path: "/parent"}, - Selectors: []*types.Selector{ - {Type: "zebra", Value: "zebra:2000"}, - {Type: "alpha", Value: "alpha:2000"}, - }, - X509SvidTtl: 60, - JwtSvidTtl: 0, - FederatesWith: []string{"spiffe://domaina.test", "spiffe://domainb.test"}, - Admin: true, - ExpiresAt: 1552410266, - DnsNames: []string{"unu1000", "ung1000"}, - Downstream: true, - Hint: "external", - } - entry2Resp := proto.Clone(entry2).(*types.Entry) entry2Resp.CreatedAt = 1547583197 entry3Resp := proto.Clone(entry3).(*types.Entry) @@ -416,30 +398,6 @@ func TestUpdate(t *testing.T) { expErrPretty: "Error: selector \"unix\" must be formatted as type:value\n", expErrJSON: "Error: selector \"unix\" must be formatted as type:value\n", }, - { - name: "Negative TTL", - args: []string{"-entryID", "entry-id", "-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "-10"}, - expErrPretty: "Error: a positive TTL is required\n", - expErrJSON: "Error: a positive TTL is required\n", - }, - { - name: "Invalid TTL and X509SvidTtl", - args: []string{"-entryID", "entry-id", "-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "10", "-x509SVIDTTL", "20"}, - expErrPretty: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - expErrJSON: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - }, - { - name: "Invalid TTL and JwtSvidTtl", - args: []string{"-entryID", "entry-id", "-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "10", "-jwtSVIDTTL", "20"}, - expErrPretty: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - expErrJSON: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - }, - { - name: "Invalid TTL and both X509SvidTtl and JwtSvidTtl", - args: []string{"-entryID", "entry-id", "-selector", "unix", "-parentID", "spiffe://example.org/parent", "-spiffeID", "spiffe://example.org/workload", "-ttl", "10", "-x509SVIDTTL", "20", "-jwtSVIDTTL", "30"}, - expErrPretty: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - expErrJSON: "Error: use x509SVIDTTL and jwtSVIDTTL flags or the deprecated ttl flag\n", - }, { name: "Server error", args: []string{"-entryID", "entry-id", "-spiffeID", "spiffe://example.org/workload", "-parentID", "spiffe://example.org/parent", "-selector", "unix:uid:1"}, @@ -495,58 +453,6 @@ DNS name : ung1000 Admin : true Hint : external -`, time.Unix(1552410266, 0).UTC()), - expOutJSON: fmt.Sprintf(`{ - "results": [ - { - "status": { - "code": 0, - "message": "OK" - }, - "entry": %s - } - ] -}`, entry0AdminJSON), - }, - { - name: "Update succeeds using deprecated command line arguments", - args: []string{ - "-entryID", "entry-id", - "-spiffeID", "spiffe://example.org/workload", - "-parentID", "spiffe://example.org/parent", - "-selector", "zebra:zebra:2000", - "-selector", "alpha:alpha:2000", - "-ttl", "60", - "-federatesWith", "spiffe://domaina.test", - "-federatesWith", "spiffe://domainb.test", - "-admin", - "-entryExpiry", "1552410266", - "-dns", "unu1000", - "-dns", "ung1000", - "-downstream", - "-hint", "external", - }, - expReq: &entryv1.BatchUpdateEntryRequest{ - Entries: []*types.Entry{entry5}, - }, - fakeResp: fakeRespOKFromCmd, - expOutPretty: fmt.Sprintf(`Entry ID : entry-id -SPIFFE ID : spiffe://example.org/workload -Parent ID : spiffe://example.org/parent -Revision : 0 -Downstream : true -X509-SVID TTL : 60 -JWT-SVID TTL : 30 -Expiration time : %s -Selector : zebra:zebra:2000 -Selector : alpha:alpha:2000 -FederatesWith : spiffe://domaina.test -FederatesWith : spiffe://domainb.test -DNS name : unu1000 -DNS name : ung1000 -Admin : true -Hint : external - `, time.Unix(1552410266, 0).UTC()), expOutJSON: fmt.Sprintf(`{ "results": [ diff --git a/cmd/spire-server/cli/entry/util_posix_test.go b/cmd/spire-server/cli/entry/util_posix_test.go index 7b04cb3f96d..2ac95988733 100644 --- a/cmd/spire-server/cli/entry/util_posix_test.go +++ b/cmd/spire-server/cli/entry/util_posix_test.go @@ -21,7 +21,7 @@ const ( -hint string The entry hint, used to disambiguate entries with the same SPIFFE ID -jwtSVIDTTL int - The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. -node If set, this entry will be applied to matching nodes rather than workloads -output value @@ -36,10 +36,8 @@ const ( The SPIFFE ID that this record represents -storeSVID A boolean value that, when set, indicates that the resulting issued SVID from this entry must be stored through an SVIDStore plugin - -ttl int - The lifetime, in seconds, for SVIDs issued based on this registration entry. This flag is deprecated in favor of x509SVIDTTL and jwtSVIDTTL and will be removed in a future version -x509SVIDTTL int - The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. ` showUsage = `Usage of entry show: -downstream @@ -83,7 +81,7 @@ const ( -hint string The entry hint, used to disambiguate entries with the same SPIFFE ID -jwtSVIDTTL int - The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. -output value Desired output format (pretty, json); default: pretty. -parentID string @@ -96,10 +94,8 @@ const ( The SPIFFE ID that this record represents -storeSVID A boolean value that, when set, indicates that the resulting issued SVID from this entry must be stored through an SVIDStore plugin - -ttl int - The lifetime, in seconds, for SVIDs issued based on this registration entry. This flag is deprecated in favor of x509SVIDTTL and jwtSVIDTTL and will be removed in a future version -x509SVIDTTL int - The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. ` deleteUsage = `Usage of entry delete: -entryID string diff --git a/cmd/spire-server/cli/entry/util_windows_test.go b/cmd/spire-server/cli/entry/util_windows_test.go index 18f5c88af42..06fd7e49422 100644 --- a/cmd/spire-server/cli/entry/util_windows_test.go +++ b/cmd/spire-server/cli/entry/util_windows_test.go @@ -21,7 +21,7 @@ const ( -hint string The entry hint, used to disambiguate entries with the same SPIFFE ID -jwtSVIDTTL int - The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. -namedPipeName string Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") -node @@ -36,10 +36,8 @@ const ( The SPIFFE ID that this record represents -storeSVID A boolean value that, when set, indicates that the resulting issued SVID from this entry must be stored through an SVIDStore plugin - -ttl int - The lifetime, in seconds, for SVIDs issued based on this registration entry. This flag is deprecated in favor of x509SVIDTTL and jwtSVIDTTL and will be removed in a future version -x509SVIDTTL int - The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. ` showUsage = `Usage of entry show: -downstream @@ -83,7 +81,7 @@ const ( -hint string The entry hint, used to disambiguate entries with the same SPIFFE ID -jwtSVIDTTL int - The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for JWT-SVIDs issued based on this registration entry. -namedPipeName string Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") -output value @@ -96,10 +94,8 @@ const ( The SPIFFE ID that this record represents -storeSVID A boolean value that, when set, indicates that the resulting issued SVID from this entry must be stored through an SVIDStore plugin - -ttl int - The lifetime, in seconds, for SVIDs issued based on this registration entry. This flag is deprecated in favor of x509SVIDTTL and jwtSVIDTTL and will be removed in a future version -x509SVIDTTL int - The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. Overrides ttl flag + The lifetime, in seconds, for x509-SVIDs issued based on this registration entry. ` deleteUsage = `Usage of entry delete: -entryID string diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_activate.go b/cmd/spire-server/cli/localauthority/jwt/jwt_activate.go new file mode 100644 index 00000000000..289f8f2cfd2 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_activate.go @@ -0,0 +1,88 @@ +package jwt + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewJWTActivateCommand creates a new "jwt activate" subcommand for "localauthority" command. +func NewJWTActivateCommand() cli.Command { + return NewJWTActivateCommandWithEnv(commoncli.DefaultEnv) +} + +// NewJWTActivateCommandWithEnv creates a new "jwt activate" subcommand for "localauthority" command +// using the environment specified +func NewJWTActivateCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &jwtActivateCommand{env: env}) +} + +type jwtActivateCommand struct { + authorityID string + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *jwtActivateCommand) Name() string { + return "localauthority jwt activate" +} + +func (*jwtActivateCommand) Synopsis() string { + return "Activates a prepared JWT authority for use, which will cause it to be used for all JWT signing operations serviced by this server going forward" +} + +func (c *jwtActivateCommand) AppendFlags(f *flag.FlagSet) { + f.StringVar(&c.authorityID, "authorityID", "", "The authority ID of the JWT authority to activate") + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintJWTActivate) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority jwt activate` CLI command +func (c *jwtActivateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + if err := c.validate(); err != nil { + return err + } + + client := serverClient.NewLocalAuthorityClient() + resp, err := client.ActivateJWTAuthority(ctx, &localauthorityv1.ActivateJWTAuthorityRequest{ + AuthorityId: c.authorityID, + }) + if err != nil { + return fmt.Errorf("could not activate JWT authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func (c *jwtActivateCommand) validate() error { + if c.authorityID == "" { + return errors.New("an authority ID is required") + } + + return nil +} + +func prettyPrintJWTActivate(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.ActivateJWTAuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Activated JWT authority:") + if r.ActivatedAuthority == nil { + return errors.New("internal error: expected to have activated JWT authority information") + } + + env.Printf(" Authority ID: %s\n", r.ActivatedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.ActivatedAuthority.ExpiresAt, 0).UTC()) + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_activate_posix_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_activate_posix_test.go new file mode 100644 index 00000000000..0c31fc4d2bc --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_activate_posix_test.go @@ -0,0 +1,14 @@ +//go:build !windows + +package jwt_test + +var ( + jwtActivateUsage = `Usage of localauthority jwt activate: + -authorityID string + The authority ID of the JWT authority to activate + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_activate_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_activate_test.go new file mode 100644 index 00000000000..89998cfe7b0 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_activate_test.go @@ -0,0 +1,89 @@ +package jwt_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/jwt" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestJWTActivateHelp(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTActivateCommandWithEnv) + + test.Client.Help() + require.Equal(t, jwtActivateUsage, test.Stderr.String()) +} + +func TestJWTActivateSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTActivateCommandWithEnv) + require.Equal(t, "Activates a prepared JWT authority for use, which will cause it to be used for all JWT signing operations serviced by this server going forward", test.Client.Synopsis()) +} + +func TestJWTActivate(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + active, prepared *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + args: []string{"-authorityID", "prepared-id"}, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + expectStdoutPretty: "Activated JWT authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n", + expectStdoutJSON: `{"activated_authority":{"authority_id":"active-id","expires_at":"1001"}}`, + }, + { + name: "no authority id", + expectReturnCode: 1, + expectStderr: "Error: an authority ID is required\n", + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + args: []string{"-authorityID", "prepared-id"}, + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not activate JWT authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTActivateCommandWithEnv) + test.Server.ActiveJWT = tt.active + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_activate_windows_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_activate_windows_test.go new file mode 100644 index 00000000000..382b0df1bbb --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_activate_windows_test.go @@ -0,0 +1,14 @@ +//go:build windows + +package jwt_test + +var ( + jwtActivateUsage = `Usage of localauthority jwt activate: + -authorityID string + The authority ID of the JWT authority to activate + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_prepare.go b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare.go new file mode 100644 index 00000000000..b10a98233f9 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare.go @@ -0,0 +1,72 @@ +package jwt + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewJWTPrepareCommand creates a new "jwt prepare" subcommand for "localauthority" command. +func NewJWTPrepareCommand() cli.Command { + return NewJWTPrepareCommandWithEnv(commoncli.DefaultEnv) +} + +// NewJWTPrepareCommandWithEnv creates a new "jwt prepare" subcommand for "localauthority" command +// using the environment specified +func NewJWTPrepareCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &jwtPrepareCommand{env: env}) +} + +type jwtPrepareCommand struct { + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *jwtPrepareCommand) Name() string { + return "localauthority jwt prepare" +} + +func (*jwtPrepareCommand) Synopsis() string { + return "Prepares a new JWT authority for use by generating a new key and injecting it into the bundle" +} + +func (c *jwtPrepareCommand) AppendFlags(f *flag.FlagSet) { + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintJWTPrepare) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority jwt prepare` CLI command +func (c *jwtPrepareCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + client := serverClient.NewLocalAuthorityClient() + resp, err := client.PrepareJWTAuthority(ctx, &localauthorityv1.PrepareJWTAuthorityRequest{}) + if err != nil { + return fmt.Errorf("could not prepare JWT authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func prettyPrintJWTPrepare(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.PrepareJWTAuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Prepared JWT authority:") + if r.PreparedAuthority == nil { + return errors.New("internal error: expected to have prepared JWT authority information") + } + + env.Printf(" Authority ID: %s\n", r.PreparedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.PreparedAuthority.ExpiresAt, 0).UTC()) + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_posix_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_posix_test.go new file mode 100644 index 00000000000..c8bafb0e224 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_posix_test.go @@ -0,0 +1,12 @@ +//go:build !windows + +package jwt_test + +var ( + jwtPrepareUsage = `Usage of localauthority jwt prepare: + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_test.go new file mode 100644 index 00000000000..77d10299e60 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_test.go @@ -0,0 +1,78 @@ +package jwt_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/jwt" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestJWTPrepareHelp(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTPrepareCommandWithEnv) + + test.Client.Help() + require.Equal(t, jwtPrepareUsage, test.Stderr.String()) +} + +func TestJWTPrepareSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTPrepareCommandWithEnv) + require.Equal(t, "Prepares a new JWT authority for use by generating a new key and injecting it into the bundle", test.Client.Synopsis()) +} + +func TestJWTPrepare(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + prepared *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + expectStdoutPretty: "Prepared JWT authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n", + expectStdoutJSON: `{"prepared_authority":{"authority_id":"prepared-id","expires_at":"1002"}}`, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not prepare JWT authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTPrepareCommandWithEnv) + test.Server.PreparedJWT = tt.prepared + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_windows_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_windows_test.go new file mode 100644 index 00000000000..bd54e03f072 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_prepare_windows_test.go @@ -0,0 +1,12 @@ +//go:build windows + +package jwt_test + +var ( + jwtPrepareUsage = `Usage of localauthority jwt prepare: + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_revoke.go b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke.go new file mode 100644 index 00000000000..2f5fe49886a --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke.go @@ -0,0 +1,88 @@ +package jwt + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewJWTActivateCommand creates a new "jwt revoke" subcommand for "localauthority" command. +func NewJWTRevokeCommand() cli.Command { + return NewJWTRevokeCommandWithEnv(commoncli.DefaultEnv) +} + +// NewJWTActivateCommandWithEnv creates a new "jwt revoke" subcommand for "localauthority" command +// using the environment specified +func NewJWTRevokeCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &jwtRevokeCommand{env: env}) +} + +type jwtRevokeCommand struct { + authorityID string + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *jwtRevokeCommand) Name() string { + return "localauthority jwt revoke" +} + +func (*jwtRevokeCommand) Synopsis() string { + return "Revokes the previously active JWT authority by removing it from the bundle and propagating this update throughout the cluster" +} + +func (c *jwtRevokeCommand) AppendFlags(f *flag.FlagSet) { + f.StringVar(&c.authorityID, "authorityID", "", "The authority ID of the JWT authority to revoke") + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintJWTRevoke) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority jwt revoke` CLI command +func (c *jwtRevokeCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + if err := c.validate(); err != nil { + return err + } + + client := serverClient.NewLocalAuthorityClient() + resp, err := client.RevokeJWTAuthority(ctx, &localauthorityv1.RevokeJWTAuthorityRequest{ + AuthorityId: c.authorityID, + }) + if err != nil { + return fmt.Errorf("could not revoke JWT authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func (c *jwtRevokeCommand) validate() error { + if c.authorityID == "" { + return errors.New("an authority ID is required") + } + + return nil +} + +func prettyPrintJWTRevoke(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.RevokeJWTAuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Revoked JWT authority:") + if r.RevokedAuthority == nil { + return errors.New("internal error: expected to have revoked JWT authority information") + } + + env.Printf(" Authority ID: %s\n", r.RevokedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.RevokedAuthority.ExpiresAt, 0).UTC()) + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_posix_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_posix_test.go new file mode 100644 index 00000000000..748e1950207 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_posix_test.go @@ -0,0 +1,14 @@ +//go:build !windows + +package jwt_test + +var ( + jwtRevokeUsage = `Usage of localauthority jwt revoke: + -authorityID string + The authority ID of the JWT authority to revoke + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_test.go new file mode 100644 index 00000000000..9897a59dc1b --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_test.go @@ -0,0 +1,85 @@ +package jwt_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/jwt" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestJWTRevokeHelp(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTRevokeCommandWithEnv) + + test.Client.Help() + require.Equal(t, jwtRevokeUsage, test.Stderr.String()) +} + +func TestJWTRevokeSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTRevokeCommandWithEnv) + require.Equal(t, "Revokes the previously active JWT authority by removing it from the bundle and propagating this update throughout the cluster", test.Client.Synopsis()) +} + +func TestJWTRevoke(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + revoked *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + args: []string{"-authorityID", "prepared-id"}, + revoked: &localauthorityv1.AuthorityState{ + AuthorityId: "revoked-id", + ExpiresAt: 1001, + }, + expectStdoutPretty: "Revoked JWT authority:\n Authority ID: revoked-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n", + expectStdoutJSON: `{"revoked_authority":{"authority_id":"revoked-id","expires_at":"1001"}}`, + }, + { + name: "no authority id", + expectReturnCode: 1, + expectStderr: "Error: an authority ID is required\n", + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + args: []string{"-authorityID", "tainted-id"}, + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not revoke JWT authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTRevokeCommandWithEnv) + test.Server.RevokedJWT = tt.revoked + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_windows_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_windows_test.go new file mode 100644 index 00000000000..f0f76a9b0ba --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_revoke_windows_test.go @@ -0,0 +1,14 @@ +//go:build windows + +package jwt_test + +var ( + jwtRevokeUsage = `Usage of localauthority jwt revoke: + -authorityID string + The authority ID of the JWT authority to revoke + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_show.go b/cmd/spire-server/cli/localauthority/jwt/jwt_show.go new file mode 100644 index 00000000000..cee9cf37e2d --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_show.go @@ -0,0 +1,87 @@ +package jwt + +import ( + "context" + "errors" + "flag" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewJWTShowCommand creates a new "jwt show" subcommand for "localauthority" command. +func NewJWTShowCommand() cli.Command { + return NewJWTShowCommandWithEnv(commoncli.DefaultEnv) +} + +// NewJWTShowCommandWithEnv creates a new "jwt show" subcommand for "localauthority" command +// using the environment specified +func NewJWTShowCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &jwtShowCommand{env: env}) +} + +type jwtShowCommand struct { + printer cliprinter.Printer + + env *commoncli.Env +} + +func (c *jwtShowCommand) Name() string { + return "localauthority jwt show" +} + +func (*jwtShowCommand) Synopsis() string { + return "Shows the local JWT authorities" +} + +func (c *jwtShowCommand) AppendFlags(f *flag.FlagSet) { + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintJWTShow) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority jwt show` CLI command +func (c *jwtShowCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + client := serverClient.NewLocalAuthorityClient() + resp, err := client.GetJWTAuthorityState(ctx, &localauthorityv1.GetJWTAuthorityStateRequest{}) + if err != nil { + return err + } + + return c.printer.PrintProto(resp) +} + +func prettyPrintJWTShow(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.GetJWTAuthorityStateResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Active JWT authority:") + if r.Active != nil { + env.Printf(" Authority ID: %s\n", r.Active.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.Active.ExpiresAt, 0).UTC()) + } else { + env.Println(" No active JWT authority found") + } + env.Println() + env.Println("Prepared JWT authority:") + if r.Prepared != nil { + env.Printf(" Authority ID: %s\n", r.Prepared.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.Prepared.ExpiresAt, 0).UTC()) + } else { + env.Println(" No prepared JWT authority found") + } + env.Println() + env.Println("Old JWT authority:") + if r.Old != nil { + env.Printf(" Authority ID: %s\n", r.Old.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.Old.ExpiresAt, 0).UTC()) + } else { + env.Println(" No old JWT authority found") + } + return nil +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_show_posix_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_show_posix_test.go new file mode 100644 index 00000000000..685692db23e --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_show_posix_test.go @@ -0,0 +1,12 @@ +//go:build !windows + +package jwt_test + +var ( + jwtShowUsage = `Usage of localauthority jwt show: + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_show_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_show_test.go new file mode 100644 index 00000000000..c6e3e5e1142 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_show_test.go @@ -0,0 +1,133 @@ +package jwt_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/jwt" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestJWTShowHelp(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTShowCommandWithEnv) + + test.Client.Help() + require.Equal(t, jwtShowUsage, test.Stderr.String()) +} + +func TestJWTShowSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTShowCommandWithEnv) + require.Equal(t, "Shows the local JWT authorities", test.Client.Synopsis()) +} + +func TestJWTShow(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + + active, + prepared, + old *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + old: &localauthorityv1.AuthorityState{ + AuthorityId: "old-id", + ExpiresAt: 1003, + }, + expectStdoutPretty: "Active JWT authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n\nPrepared JWT authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n\nOld JWT authority:\n Authority ID: old-id\n Expires at: 1970-01-01 00:16:43 +0000 UTC\n", + expectStdoutJSON: `{"active":{"authority_id":"active-id","expires_at":"1001"},"prepared":{"authority_id":"prepared-id","expires_at":"1002"},"old":{"authority_id":"old-id","expires_at":"1003"}}`, + }, + { + name: "success - no active", + expectReturnCode: 0, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + old: &localauthorityv1.AuthorityState{ + AuthorityId: "old-id", + ExpiresAt: 1003, + }, + expectStdoutPretty: "Active JWT authority:\n No active JWT authority found\n\nPrepared JWT authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n\nOld JWT authority:\n Authority ID: old-id\n Expires at: 1970-01-01 00:16:43 +0000 UTC\n", + expectStdoutJSON: `{"prepared":{"authority_id":"prepared-id","expires_at":"1002"},"old":{"authority_id":"old-id","expires_at":"1003"}}`, + }, + { + name: "success - no prepared", + expectReturnCode: 0, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + old: &localauthorityv1.AuthorityState{ + AuthorityId: "old-id", + ExpiresAt: 1003, + }, + expectStdoutPretty: "Active JWT authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n\nPrepared JWT authority:\n No prepared JWT authority found\n\nOld JWT authority:\n Authority ID: old-id\n Expires at: 1970-01-01 00:16:43 +0000 UTC\n", + expectStdoutJSON: `{"active":{"authority_id":"active-id","expires_at":"1001"},"old":{"authority_id":"old-id","expires_at":"1003"}}`, + }, + { + name: "success - no old", + expectReturnCode: 0, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + expectStdoutPretty: "Active JWT authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n\nPrepared JWT authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n\nOld JWT authority:\n No old JWT authority found\n", + expectStdoutJSON: `{"active":{"authority_id":"active-id","expires_at":"1001"},"prepared":{"authority_id":"prepared-id","expires_at":"1002"}}`, + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTShowCommandWithEnv) + test.Server.ActiveJWT = tt.active + test.Server.PreparedJWT = tt.prepared + test.Server.OldJWT = tt.old + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_show_windows_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_show_windows_test.go new file mode 100644 index 00000000000..9b7c48b162c --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_show_windows_test.go @@ -0,0 +1,12 @@ +//go:build windows + +package jwt_test + +var ( + jwtShowUsage = `Usage of localauthority jwt show: + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_taint.go b/cmd/spire-server/cli/localauthority/jwt/jwt_taint.go new file mode 100644 index 00000000000..c7e7ed0d436 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_taint.go @@ -0,0 +1,92 @@ +package jwt + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewJWTTaintCommand creates a new "jwt taint" subcommand for "localauthority" command. +func NewJWTTaintCommand() cli.Command { + return newJWTTaintCommand(commoncli.DefaultEnv) +} + +// NewJWTTaintCommandWithEnv creates a new "jwt taint" subcommand for "localauthority" command +// using the environment specified +func NewJWTTaintCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &jwtTaintCommand{env: env}) +} + +func newJWTTaintCommand(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &jwtTaintCommand{env: env}) +} + +type jwtTaintCommand struct { + authorityID string + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *jwtTaintCommand) Name() string { + return "localauthority jwt taint" +} + +func (*jwtTaintCommand) Synopsis() string { + return "Marks the previously active JWT authority as being tainted" +} + +func (c *jwtTaintCommand) AppendFlags(f *flag.FlagSet) { + f.StringVar(&c.authorityID, "authorityID", "", "The authority ID of the JWT authority to taint") + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintJWTTaint) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority jwt taint` CLI command +func (c *jwtTaintCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + if err := c.validate(); err != nil { + return err + } + + client := serverClient.NewLocalAuthorityClient() + resp, err := client.TaintJWTAuthority(ctx, &localauthorityv1.TaintJWTAuthorityRequest{ + AuthorityId: c.authorityID, + }) + if err != nil { + return fmt.Errorf("could not taint JWT authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func prettyPrintJWTTaint(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.TaintJWTAuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Tainted JWT authority:") + if r.TaintedAuthority == nil { + return errors.New("internal error: expected to have tainted JWT authority information") + } + + env.Printf(" Authority ID: %s\n", r.TaintedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.TaintedAuthority.ExpiresAt, 0).UTC()) + + return nil +} + +func (c *jwtTaintCommand) validate() error { + if c.authorityID == "" { + return errors.New("an authority ID is required") + } + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_taint_posix_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_taint_posix_test.go new file mode 100644 index 00000000000..e761cc8503f --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_taint_posix_test.go @@ -0,0 +1,14 @@ +//go:build !windows + +package jwt_test + +var ( + jwtTaintUsage = `Usage of localauthority jwt taint: + -authorityID string + The authority ID of the JWT authority to taint + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_taint_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_taint_test.go new file mode 100644 index 00000000000..6108270e059 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_taint_test.go @@ -0,0 +1,85 @@ +package jwt_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/jwt" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestJWTTaintHelp(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTTaintCommandWithEnv) + + test.Client.Help() + require.Equal(t, jwtTaintUsage, test.Stderr.String()) +} + +func TestJWTTaintSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTTaintCommandWithEnv) + require.Equal(t, "Marks the previously active JWT authority as being tainted", test.Client.Synopsis()) +} + +func TestJWTTaint(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + tainted *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + args: []string{"-authorityID", "prepared-id"}, + tainted: &localauthorityv1.AuthorityState{ + AuthorityId: "tainted-id", + ExpiresAt: 1001, + }, + expectStdoutPretty: "Tainted JWT authority:\n Authority ID: tainted-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n", + expectStdoutJSON: `{"tainted_authority":{"authority_id":"tainted-id","expires_at":"1001"}}`, + }, + { + name: "no authority id", + expectReturnCode: 1, + expectStderr: "Error: an authority ID is required\n", + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + args: []string{"-authorityID", "old-id"}, + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not taint JWT authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, jwt.NewJWTTaintCommandWithEnv) + test.Server.TaintedJWT = tt.tainted + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/jwt/jwt_taint_windows_test.go b/cmd/spire-server/cli/localauthority/jwt/jwt_taint_windows_test.go new file mode 100644 index 00000000000..17929f1a6b7 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/jwt/jwt_taint_windows_test.go @@ -0,0 +1,14 @@ +//go:build windows + +package jwt_test + +var ( + jwtTaintUsage = `Usage of localauthority jwt taint: + -authorityID string + The authority ID of the JWT authority to taint + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_activate.go b/cmd/spire-server/cli/localauthority/x509/x509_activate.go new file mode 100644 index 00000000000..118c2e409b8 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_activate.go @@ -0,0 +1,88 @@ +package x509 + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewX509ActivateCommand creates a new "x509 activate" subcommand for "localauthority" command. +func NewX509ActivateCommand() cli.Command { + return NewX509ActivateCommandWithEnv(commoncli.DefaultEnv) +} + +// NewX509ActivateCommandWithEnv creates a new "x509 activate" subcommand for "localauthority" command +// using the environment specified +func NewX509ActivateCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &x509ActivateCommand{env: env}) +} + +type x509ActivateCommand struct { + authorityID string + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *x509ActivateCommand) Name() string { + return "localauthority x509 activate" +} + +func (*x509ActivateCommand) Synopsis() string { + return "Activates a prepared X.509 authority for use, which will cause it to be used for all X.509 signing operations serviced by this server going forward" +} + +func (c *x509ActivateCommand) AppendFlags(f *flag.FlagSet) { + f.StringVar(&c.authorityID, "authorityID", "", "The authority ID of the X.509 authority to activate") + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintX509Activate) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority x509 activate` CLI command +func (c *x509ActivateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + if err := c.validate(); err != nil { + return err + } + + client := serverClient.NewLocalAuthorityClient() + resp, err := client.ActivateX509Authority(ctx, &localauthorityv1.ActivateX509AuthorityRequest{ + AuthorityId: c.authorityID, + }) + if err != nil { + return fmt.Errorf("could not activate X.509 authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func (c *x509ActivateCommand) validate() error { + if c.authorityID == "" { + return errors.New("an authority ID is required") + } + + return nil +} + +func prettyPrintX509Activate(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.ActivateX509AuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Activated X.509 authority:") + if r.ActivatedAuthority == nil { + return errors.New("internal error: expected to have activated X.509 authority information") + } + + env.Printf(" Authority ID: %s\n", r.ActivatedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.ActivatedAuthority.ExpiresAt, 0).UTC()) + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_activate_posix_test.go b/cmd/spire-server/cli/localauthority/x509/x509_activate_posix_test.go new file mode 100644 index 00000000000..1ef51934393 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_activate_posix_test.go @@ -0,0 +1,14 @@ +//go:build !windows + +package x509_test + +var ( + x509ActivateUsage = `Usage of localauthority x509 activate: + -authorityID string + The authority ID of the X.509 authority to activate + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_activate_test.go b/cmd/spire-server/cli/localauthority/x509/x509_activate_test.go new file mode 100644 index 00000000000..97e835fef78 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_activate_test.go @@ -0,0 +1,89 @@ +package x509_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/x509" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestX509ActivateHelp(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509ActivateCommandWithEnv) + + test.Client.Help() + require.Equal(t, x509ActivateUsage, test.Stderr.String()) +} + +func TestX509ActivateSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509ActivateCommandWithEnv) + require.Equal(t, "Activates a prepared X.509 authority for use, which will cause it to be used for all X.509 signing operations serviced by this server going forward", test.Client.Synopsis()) +} + +func TestX509Activate(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + active, prepared *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + args: []string{"-authorityID", "prepared-id"}, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + expectStdoutPretty: "Activated X.509 authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n", + expectStdoutJSON: `{"activated_authority":{"authority_id":"active-id","expires_at":"1001"}}`, + }, + { + name: "no authority id", + expectReturnCode: 1, + expectStderr: "Error: an authority ID is required\n", + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + args: []string{"-authorityID", "prepared-id"}, + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not activate X.509 authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509ActivateCommandWithEnv) + test.Server.ActiveX509 = tt.active + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_activate_windows_test.go b/cmd/spire-server/cli/localauthority/x509/x509_activate_windows_test.go new file mode 100644 index 00000000000..b011473b71a --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_activate_windows_test.go @@ -0,0 +1,14 @@ +//go:build windows + +package x509_test + +var ( + x509ActivateUsage = `Usage of localauthority x509 activate: + -authorityID string + The authority ID of the X.509 authority to activate + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_prepare.go b/cmd/spire-server/cli/localauthority/x509/x509_prepare.go new file mode 100644 index 00000000000..7553c1f0909 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_prepare.go @@ -0,0 +1,72 @@ +package x509 + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewX509PrepareCommand creates a new "x509 prepare" subcommand for "localauthority" command. +func NewX509PrepareCommand() cli.Command { + return NewX509PrepareCommandWithEnv(commoncli.DefaultEnv) +} + +// NewX509PrepareCommandWithEnv creates a new "x509 prepare" subcommand for "localauthority" command +// using the environment specified +func NewX509PrepareCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &x509PrepareCommand{env: env}) +} + +type x509PrepareCommand struct { + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *x509PrepareCommand) Name() string { + return "localauthority x509 prepare" +} + +func (*x509PrepareCommand) Synopsis() string { + return "Prepares a new X.509 authority for use by generating a new key and injecting the resulting CA certificate into the bundle" +} + +func (c *x509PrepareCommand) AppendFlags(f *flag.FlagSet) { + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintX509Prepare) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority x509 prepare` CLI command +func (c *x509PrepareCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + client := serverClient.NewLocalAuthorityClient() + resp, err := client.PrepareX509Authority(ctx, &localauthorityv1.PrepareX509AuthorityRequest{}) + if err != nil { + return fmt.Errorf("could not prepare X.509 authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func prettyPrintX509Prepare(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.PrepareX509AuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Prepared X.509 authority:") + if r.PreparedAuthority == nil { + return errors.New("internal error: expected to have prepared X.509 authority information") + } + + env.Printf(" Authority ID: %s\n", r.PreparedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.PreparedAuthority.ExpiresAt, 0).UTC()) + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_prepare_posix_test.go b/cmd/spire-server/cli/localauthority/x509/x509_prepare_posix_test.go new file mode 100644 index 00000000000..4d06a12d842 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_prepare_posix_test.go @@ -0,0 +1,12 @@ +//go:build !windows + +package x509_test + +var ( + x509PrepareUsage = `Usage of localauthority x509 prepare: + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_prepare_test.go b/cmd/spire-server/cli/localauthority/x509/x509_prepare_test.go new file mode 100644 index 00000000000..f1ccc6f773e --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_prepare_test.go @@ -0,0 +1,78 @@ +package x509_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/x509" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestX509PrepareHelp(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509PrepareCommandWithEnv) + + test.Client.Help() + require.Equal(t, x509PrepareUsage, test.Stderr.String()) +} + +func TestX509PrepareSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509PrepareCommandWithEnv) + require.Equal(t, "Prepares a new X.509 authority for use by generating a new key and injecting the resulting CA certificate into the bundle", test.Client.Synopsis()) +} + +func TestX509Prepare(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + prepared *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + expectStdoutPretty: "Prepared X.509 authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n", + expectStdoutJSON: `{"prepared_authority":{"authority_id":"prepared-id","expires_at":"1002"}}`, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not prepare X.509 authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509PrepareCommandWithEnv) + test.Server.PreparedX509 = tt.prepared + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_prepare_windows_test.go b/cmd/spire-server/cli/localauthority/x509/x509_prepare_windows_test.go new file mode 100644 index 00000000000..353ee4654e8 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_prepare_windows_test.go @@ -0,0 +1,12 @@ +//go:build windows + +package x509_test + +var ( + x509PrepareUsage = `Usage of localauthority x509 prepare: + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_revoke.go b/cmd/spire-server/cli/localauthority/x509/x509_revoke.go new file mode 100644 index 00000000000..ac42c35da8f --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_revoke.go @@ -0,0 +1,88 @@ +package x509 + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewX509ActivateCommand creates a new "x509 revoke" subcommand for "localauthority" command. +func NewX509RevokeCommand() cli.Command { + return NewX509RevokeCommandWithEnv(commoncli.DefaultEnv) +} + +// NewX509ActivateCommandWithEnv creates a new "x509 revoke" subcommand for "localauthority" command +// using the environment specified +func NewX509RevokeCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &x509RevokeCommand{env: env}) +} + +type x509RevokeCommand struct { + authorityID string + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *x509RevokeCommand) Name() string { + return "localauthority x509 revoke" +} + +func (*x509RevokeCommand) Synopsis() string { + return "Revokes the previously active X.509 authority by removing it from the bundle and propagating this update throughout the cluster" +} + +func (c *x509RevokeCommand) AppendFlags(f *flag.FlagSet) { + f.StringVar(&c.authorityID, "authorityID", "", "The authority ID of the X.509 authority to revoke") + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintX509Revoke) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority x509 revoke` CLI command +func (c *x509RevokeCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + if err := c.validate(); err != nil { + return err + } + + client := serverClient.NewLocalAuthorityClient() + resp, err := client.RevokeX509Authority(ctx, &localauthorityv1.RevokeX509AuthorityRequest{ + AuthorityId: c.authorityID, + }) + if err != nil { + return fmt.Errorf("could not revoke X.509 authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func (c *x509RevokeCommand) validate() error { + if c.authorityID == "" { + return errors.New("an authority ID is required") + } + + return nil +} + +func prettyPrintX509Revoke(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.RevokeX509AuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Revoked X.509 authority:") + if r.RevokedAuthority == nil { + return errors.New("internal error: expected to have revoked X.509 authority information") + } + + env.Printf(" Authority ID: %s\n", r.RevokedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.RevokedAuthority.ExpiresAt, 0).UTC()) + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_revoke_posix_test.go b/cmd/spire-server/cli/localauthority/x509/x509_revoke_posix_test.go new file mode 100644 index 00000000000..782282ed787 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_revoke_posix_test.go @@ -0,0 +1,14 @@ +//go:build !windows + +package x509_test + +var ( + x509RevokeUsage = `Usage of localauthority x509 revoke: + -authorityID string + The authority ID of the X.509 authority to revoke + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_revoke_test.go b/cmd/spire-server/cli/localauthority/x509/x509_revoke_test.go new file mode 100644 index 00000000000..9459de54191 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_revoke_test.go @@ -0,0 +1,85 @@ +package x509_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/x509" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestX509RevokeHelp(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509RevokeCommandWithEnv) + + test.Client.Help() + require.Equal(t, x509RevokeUsage, test.Stderr.String()) +} + +func TestX509RevokeSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509RevokeCommandWithEnv) + require.Equal(t, "Revokes the previously active X.509 authority by removing it from the bundle and propagating this update throughout the cluster", test.Client.Synopsis()) +} + +func TestX509Revoke(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + revoked *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + args: []string{"-authorityID", "prepared-id"}, + revoked: &localauthorityv1.AuthorityState{ + AuthorityId: "revoked-id", + ExpiresAt: 1001, + }, + expectStdoutPretty: "Revoked X.509 authority:\n Authority ID: revoked-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n", + expectStdoutJSON: `{"revoked_authority":{"authority_id":"revoked-id","expires_at":"1001"}}`, + }, + { + name: "no authority id", + expectReturnCode: 1, + expectStderr: "Error: an authority ID is required\n", + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + args: []string{"-authorityID", "tainted-id"}, + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not revoke X.509 authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509RevokeCommandWithEnv) + test.Server.RevokedX509 = tt.revoked + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_revoke_windows_test.go b/cmd/spire-server/cli/localauthority/x509/x509_revoke_windows_test.go new file mode 100644 index 00000000000..85896e92afb --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_revoke_windows_test.go @@ -0,0 +1,14 @@ +//go:build windows + +package x509_test + +var ( + x509RevokeUsage = `Usage of localauthority x509 revoke: + -authorityID string + The authority ID of the X.509 authority to revoke + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_show.go b/cmd/spire-server/cli/localauthority/x509/x509_show.go new file mode 100644 index 00000000000..fbe6776401d --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_show.go @@ -0,0 +1,88 @@ +package x509 + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewShowCommand creates a new "x509 show" subcommand for "localauthority" command. +func NewX509ShowCommand() cli.Command { + return NewX509ShowCommandWithEnv(commoncli.DefaultEnv) +} + +// NewX509ShowCommandWithEnv creates a new "x509 show" subcommand for "localauthority" command +// using the environment specified +func NewX509ShowCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &x509ShowCommand{env: env}) +} + +type x509ShowCommand struct { + printer cliprinter.Printer + + env *commoncli.Env +} + +func (c *x509ShowCommand) Name() string { + return "localauthority x509 show" +} + +func (*x509ShowCommand) Synopsis() string { + return "Shows the local X.509 authorities" +} + +func (c *x509ShowCommand) AppendFlags(f *flag.FlagSet) { + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintX509Show) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority x509 show` CLI command +func (c *x509ShowCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + client := serverClient.NewLocalAuthorityClient() + resp, err := client.GetX509AuthorityState(ctx, &localauthorityv1.GetX509AuthorityStateRequest{}) + if err != nil { + return fmt.Errorf("could not get X.509 authorities: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func prettyPrintX509Show(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.GetX509AuthorityStateResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Active X.509 authority:") + if r.Active != nil { + env.Printf(" Authority ID: %s\n", r.Active.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.Active.ExpiresAt, 0).UTC()) + } else { + env.Println(" No active X.509 authority found") + } + env.Println() + env.Println("Prepared X.509 authority:") + if r.Prepared != nil { + env.Printf(" Authority ID: %s\n", r.Prepared.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.Prepared.ExpiresAt, 0).UTC()) + } else { + env.Println(" No prepared X.509 authority found") + } + env.Println() + env.Println("Old X.509 authority:") + if r.Old != nil { + env.Printf(" Authority ID: %s\n", r.Old.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.Old.ExpiresAt, 0).UTC()) + } else { + env.Println(" No old X.509 authority found") + } + return nil +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_show_posix_test.go b/cmd/spire-server/cli/localauthority/x509/x509_show_posix_test.go new file mode 100644 index 00000000000..ef308506431 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_show_posix_test.go @@ -0,0 +1,12 @@ +//go:build !windows + +package x509_test + +var ( + x509ShowUsage = `Usage of localauthority x509 show: + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_show_test.go b/cmd/spire-server/cli/localauthority/x509/x509_show_test.go new file mode 100644 index 00000000000..8acfcbf8e37 --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_show_test.go @@ -0,0 +1,133 @@ +package x509_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/x509" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestX509ShowHelp(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509ShowCommandWithEnv) + + test.Client.Help() + require.Equal(t, x509ShowUsage, test.Stderr.String()) +} + +func TestX509ShowSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509ShowCommandWithEnv) + require.Equal(t, "Shows the local X.509 authorities", test.Client.Synopsis()) +} + +func TestX509Show(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + + active, + prepared, + old *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + old: &localauthorityv1.AuthorityState{ + AuthorityId: "old-id", + ExpiresAt: 1003, + }, + expectStdoutPretty: "Active X.509 authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n\nPrepared X.509 authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n\nOld X.509 authority:\n Authority ID: old-id\n Expires at: 1970-01-01 00:16:43 +0000 UTC\n", + expectStdoutJSON: `{"active":{"authority_id":"active-id","expires_at":"1001"},"prepared":{"authority_id":"prepared-id","expires_at":"1002"},"old":{"authority_id":"old-id","expires_at":"1003"}}`, + }, + { + name: "success - no active", + expectReturnCode: 0, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + old: &localauthorityv1.AuthorityState{ + AuthorityId: "old-id", + ExpiresAt: 1003, + }, + expectStdoutPretty: "Active X.509 authority:\n No active X.509 authority found\n\nPrepared X.509 authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n\nOld X.509 authority:\n Authority ID: old-id\n Expires at: 1970-01-01 00:16:43 +0000 UTC\n", + expectStdoutJSON: `{"prepared":{"authority_id":"prepared-id","expires_at":"1002"},"old":{"authority_id":"old-id","expires_at":"1003"}}`, + }, + { + name: "success - no prepared", + expectReturnCode: 0, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + old: &localauthorityv1.AuthorityState{ + AuthorityId: "old-id", + ExpiresAt: 1003, + }, + expectStdoutPretty: "Active X.509 authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n\nPrepared X.509 authority:\n No prepared X.509 authority found\n\nOld X.509 authority:\n Authority ID: old-id\n Expires at: 1970-01-01 00:16:43 +0000 UTC\n", + expectStdoutJSON: `{"active":{"authority_id":"active-id","expires_at":"1001"},"old":{"authority_id":"old-id","expires_at":"1003"}}`, + }, + { + name: "success - no old", + expectReturnCode: 0, + active: &localauthorityv1.AuthorityState{ + AuthorityId: "active-id", + ExpiresAt: 1001, + }, + prepared: &localauthorityv1.AuthorityState{ + AuthorityId: "prepared-id", + ExpiresAt: 1002, + }, + expectStdoutPretty: "Active X.509 authority:\n Authority ID: active-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n\nPrepared X.509 authority:\n Authority ID: prepared-id\n Expires at: 1970-01-01 00:16:42 +0000 UTC\n\nOld X.509 authority:\n No old X.509 authority found\n", + expectStdoutJSON: `{"active":{"authority_id":"active-id","expires_at":"1001"},"prepared":{"authority_id":"prepared-id","expires_at":"1002"}}`, + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not get X.509 authorities: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509ShowCommandWithEnv) + test.Server.ActiveX509 = tt.active + test.Server.PreparedX509 = tt.prepared + test.Server.OldX509 = tt.old + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_show_windows_test.go b/cmd/spire-server/cli/localauthority/x509/x509_show_windows_test.go new file mode 100644 index 00000000000..a00cbc9694c --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_show_windows_test.go @@ -0,0 +1,12 @@ +//go:build windows + +package x509_test + +var ( + x509ShowUsage = `Usage of localauthority x509 show: + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_taint.go b/cmd/spire-server/cli/localauthority/x509/x509_taint.go new file mode 100644 index 00000000000..f0c88cbde2e --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_taint.go @@ -0,0 +1,92 @@ +package x509 + +import ( + "context" + "errors" + "flag" + "fmt" + "time" + + "github.com/mitchellh/cli" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + "github.com/spiffe/spire/cmd/spire-server/util" + commoncli "github.com/spiffe/spire/pkg/common/cli" + "github.com/spiffe/spire/pkg/common/cliprinter" +) + +// NewX509TaintCommand creates a new "x509 taint" subcommand for "localauthority" command. +func NewX509TaintCommand() cli.Command { + return newX509TaintCommand(commoncli.DefaultEnv) +} + +// NewX509TaintCommandWithEnv creates a new "x509 taint" subcommand for "localauthority" command +// using the environment specified +func NewX509TaintCommandWithEnv(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &x509TaintCommand{env: env}) +} + +func newX509TaintCommand(env *commoncli.Env) cli.Command { + return util.AdaptCommand(env, &x509TaintCommand{env: env}) +} + +type x509TaintCommand struct { + authorityID string + printer cliprinter.Printer + env *commoncli.Env +} + +func (c *x509TaintCommand) Name() string { + return "localauthority x509 taint" +} + +func (*x509TaintCommand) Synopsis() string { + return "Marks the previously active X.509 authority as being tainted" +} + +func (c *x509TaintCommand) AppendFlags(f *flag.FlagSet) { + f.StringVar(&c.authorityID, "authorityID", "", "The authority ID of the X.509 authority to taint") + cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintX509Taint) +} + +// Run executes all logic associated with a single invocation of the +// `spire-server localauthority x509 taint` CLI command +func (c *x509TaintCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error { + if err := c.validate(); err != nil { + return err + } + + client := serverClient.NewLocalAuthorityClient() + resp, err := client.TaintX509Authority(ctx, &localauthorityv1.TaintX509AuthorityRequest{ + AuthorityId: c.authorityID, + }) + if err != nil { + return fmt.Errorf("could not taint X.509 authority: %w", err) + } + + return c.printer.PrintProto(resp) +} + +func prettyPrintX509Taint(env *commoncli.Env, results ...any) error { + r, ok := results[0].(*localauthorityv1.TaintX509AuthorityResponse) + if !ok { + return errors.New("internal error: cli printer; please report this bug") + } + + env.Println("Tainted X.509 authority:") + if r.TaintedAuthority == nil { + return errors.New("internal error: expected to have tainted X.509 authority information") + } + + env.Printf(" Authority ID: %s\n", r.TaintedAuthority.AuthorityId) + env.Printf(" Expires at: %s\n", time.Unix(r.TaintedAuthority.ExpiresAt, 0).UTC()) + + return nil +} + +func (c *x509TaintCommand) validate() error { + if c.authorityID == "" { + return errors.New("an authority ID is required") + } + + return nil +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_taint_posix_test.go b/cmd/spire-server/cli/localauthority/x509/x509_taint_posix_test.go new file mode 100644 index 00000000000..5ff7043ab9d --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_taint_posix_test.go @@ -0,0 +1,14 @@ +//go:build !windows + +package x509_test + +var ( + x509TaintUsage = `Usage of localauthority x509 taint: + -authorityID string + The authority ID of the X.509 authority to taint + -output value + Desired output format (pretty, json); default: pretty. + -socketPath string + Path to the SPIRE Server API socket (default "/tmp/spire-server/private/api.sock") +` +) diff --git a/cmd/spire-server/cli/localauthority/x509/x509_taint_test.go b/cmd/spire-server/cli/localauthority/x509/x509_taint_test.go new file mode 100644 index 00000000000..db3f0f13afc --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_taint_test.go @@ -0,0 +1,85 @@ +package x509_test + +import ( + "fmt" + "testing" + + "github.com/gogo/status" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" + authority_common "github.com/spiffe/spire/cmd/spire-server/cli/authoritycommon" + "github.com/spiffe/spire/cmd/spire-server/cli/common" + "github.com/spiffe/spire/cmd/spire-server/cli/localauthority/x509" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +func TestX509TaintHelp(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509TaintCommandWithEnv) + + test.Client.Help() + require.Equal(t, x509TaintUsage, test.Stderr.String()) +} + +func TestX509TaintSynopsys(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509TaintCommandWithEnv) + require.Equal(t, "Marks the previously active X.509 authority as being tainted", test.Client.Synopsis()) +} + +func TestX509Taint(t *testing.T) { + for _, tt := range []struct { + name string + args []string + expectReturnCode int + expectStdoutPretty string + expectStdoutJSON string + expectStderr string + serverErr error + tainted *localauthorityv1.AuthorityState + }{ + { + name: "success", + expectReturnCode: 0, + args: []string{"-authorityID", "prepared-id"}, + tainted: &localauthorityv1.AuthorityState{ + AuthorityId: "tainted-id", + ExpiresAt: 1001, + }, + expectStdoutPretty: "Tainted X.509 authority:\n Authority ID: tainted-id\n Expires at: 1970-01-01 00:16:41 +0000 UTC\n", + expectStdoutJSON: `{"tainted_authority":{"authority_id":"tainted-id","expires_at":"1001"}}`, + }, + { + name: "no authority id", + expectReturnCode: 1, + expectStderr: "Error: an authority ID is required\n", + }, + { + name: "wrong UDS path", + args: []string{common.AddrArg, common.AddrValue}, + expectReturnCode: 1, + expectStderr: common.AddrError, + }, + { + name: "server error", + args: []string{"-authorityID", "old-id"}, + serverErr: status.Error(codes.Internal, "internal server error"), + expectReturnCode: 1, + expectStderr: "Error: could not taint X.509 authority: rpc error: code = Internal desc = internal server error\n", + }, + } { + for _, format := range authority_common.AvailableFormats { + t.Run(fmt.Sprintf("%s using %s format", tt.name, format), func(t *testing.T) { + test := authority_common.SetupTest(t, x509.NewX509TaintCommandWithEnv) + test.Server.TaintedX509 = tt.tainted + test.Server.Err = tt.serverErr + args := tt.args + args = append(args, "-output", format) + + returnCode := test.Client.Run(append(test.Args, args...)) + + authority_common.RequireOutputBasedOnFormat(t, format, test.Stdout.String(), tt.expectStdoutPretty, tt.expectStdoutJSON) + require.Equal(t, tt.expectStderr, test.Stderr.String()) + require.Equal(t, tt.expectReturnCode, returnCode) + }) + } + } +} diff --git a/cmd/spire-server/cli/localauthority/x509/x509_taint_windows_test.go b/cmd/spire-server/cli/localauthority/x509/x509_taint_windows_test.go new file mode 100644 index 00000000000..4f91931f02d --- /dev/null +++ b/cmd/spire-server/cli/localauthority/x509/x509_taint_windows_test.go @@ -0,0 +1,14 @@ +//go:build windows + +package x509_test + +var ( + x509TaintUsage = `Usage of localauthority x509 taint: + -authorityID string + The authority ID of the X.509 authority to taint + -namedPipeName string + Pipe name of the SPIRE Server API named pipe (default "\\spire-server\\private\\api") + -output value + Desired output format (pretty, json); default: pretty. +` +) diff --git a/cmd/spire-server/util/util.go b/cmd/spire-server/util/util.go index 099a4f8cde1..22b48c7ac98 100644 --- a/cmd/spire-server/util/util.go +++ b/cmd/spire-server/util/util.go @@ -14,6 +14,7 @@ import ( agentv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/agent/v1" bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1" entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" loggerv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/logger/v1" svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1" @@ -49,6 +50,7 @@ type ServerClient interface { NewLoggerClient() loggerv1.LoggerClient NewSVIDClient() svidv1.SVIDClient NewTrustDomainClient() trustdomainv1.TrustDomainClient + NewLocalAuthorityClient() localauthorityv1.LocalAuthorityClient NewHealthClient() grpc_health_v1.HealthClient } @@ -96,6 +98,10 @@ func (c *serverClient) NewHealthClient() grpc_health_v1.HealthClient { return grpc_health_v1.NewHealthClient(c.conn) } +func (c *serverClient) NewLocalAuthorityClient() localauthorityv1.LocalAuthorityClient { + return localauthorityv1.NewLocalAuthorityClient(c.conn) +} + // Pluralizer concatenates `singular` to `msg` when `val` is one, and // `plural` on all other occasions. It is meant to facilitate friendlier // CLI output. diff --git a/conf/agent/agent_full.conf b/conf/agent/agent_full.conf index 52c2b239c04..bed26aeacb3 100644 --- a/conf/agent/agent_full.conf +++ b/conf/agent/agent_full.conf @@ -327,13 +327,61 @@ plugins { # use_new_container_locator: If true, enables the new container # locator algorithm that has support for cgroups v2. Default: - # false. (Linux only) - # use_new_container_locator = false + # true. (Linux only) + # use_new_container_locator = true # verbose_container_locator_logs: If true, enables verbose logging # of mountinfo and cgroup information used to locate containers. # Defaults to false. (Linux only) # verbose_container_locator_logs = false + + # experimental: Experimental features. + experimental { + # sigstore: sigstore options. Enables image cosign signatures checking. + # sigstore { + # allowed_identities: Maps OIDC issuer URIs to acceptable SANs in Fulcio certificates for validating signatures. + # Images must be signed by certificates matching these issuer-SAN pairs to be accepted. + # Supports regular expressions patterns. + # + # If unspecified, signatures from any issuer are accepted. + # + # allowed_identities { + # "https://accounts.google.com" = [".*@example.com", "subject@otherdomain.com"] + # "https://github.com/login/oauth" = ["github.com/ci.yaml@refs/tags/*"] + # "https://.*\.example.org" = ["user@.*\.example.org"] + # } + + # skipped_images: A list of image IDs to bypass Cosign's signature verification. + # For images in this list, no sigstore selectors will be generated. + # skipped_images = ["registry/image@sha256:examplehash"] + + # rekor_url: The URL for the Rekor Transparency Log Server to use with cosign. + # Default: "https://rekor.sigstore.dev" + # rekor_url = "https://rekor.sigstore.dev" + + # ignore_tlog: specifies whether to bypass the transparency log verification. + # When set to true, selectors based on the Rekor bundle are not generated. + # Default: false + # ignore_tlog = true + + # ignore_attestations: specifies whether to bypass the image attestations verification + # When set to true: the selector "image-attestations:verified" is not generated. + # Default: false + # ignore_attestations = true + + # ignore_sct: specifies whether to bypass the Signed Certificate Timestamp (SCT) verification. + # An SCT is proof of inclusion in a Certificate Transparency log. + # Default: false + # ignore_sct = true + + # RegistryCredentials maps each registry URL to its corresponding authentication credentials. + # If no credentials are provided for a specific registry, the default keychain is used for authentication. + # registry_credentials = { + # "docker.io" = { username = "user1", password = "pass1" } + # "quay.io" = { username = "user2", password = "pass2" } + # } + # } + } } } @@ -385,8 +433,8 @@ plugins { # use_new_container_locator: If true, enables the new container # locator algorithm that has support for cgroups v2. Default: - # false. (Linux only) - # use_new_container_locator = false + # true. (Linux only) + # use_new_container_locator = true # verbose_container_locator_logs: If true, enables verbose logging # of mountinfo and cgroup information used to locate containers. @@ -395,25 +443,49 @@ plugins { # experimental: Experimental features. experimental { - # sigstore: sigstore options. Enables signature checking. + # sigstore: sigstore options. Enables image cosign signatures checking. # sigstore { - # rekor_url: The URL for the rekor STL Server to use with cosign. Required. - # rekor_url = "https://rekor.sigstore.dev" + # allowed_identities: Maps OIDC issuer URIs to acceptable SANs in Fulcio certificates for validating signatures. + # Images must be signed by certificates matching these issuer-SAN pairs to be accepted. + # Supports wildcard patterns for flexible SAN specification. + # + # If unspecified, signatures from any issuer are accepted. + # + # allowed_identities { + # "https://accounts.google.com" = ["*@example.com", "subject@otherdomain.com"] + # "https://github.com/login/oauth" = ["github.com/ci.yaml@refs/tags/*"] + # } - # skip_signature_verification_image_list: List of images that should - # not be verified by cosign. They will receive a default - # sigstore-validation:passed selector, but no other sigstore related selectors. - # skip_signature_verification_image_list = ["sha:image1hash","sha:image2hash"] + # skipped_images: A list of image IDs to bypass Cosign's signature verification. + # For images in this list, no sigstore selectors will be generated. + # skipped_images = ["registry/image@sha256:examplehash"] - # allowed_subjects_list: Map of subjects that image signatures - # will be checked against, keyed by OIDC Provider URI. - # Signatures from subjects outside this list will be ignored. These should be email addresses. - # allowed_subjects_list { - # "https://accounts.google.com" = ["subject1@example.com","subject2@example.com"] - # } + # rekor_url: The URL for the Rekor Transparency Log Server to use with cosign. + # Default: "https://rekor.sigstore.dev" + # rekor_url = "https://rekor.sigstore.dev" - # enforce_sct: to be set as false in case of a private deployment not using the public CT - # enforce_sct = true + # ignore_tlog: specifies whether to bypass the transparency log verification. + # When set to true the selectors based on the Rekor bundle are not generated. + # Default: false + # ignore_tlog = true + + # ignore_attestations: specifies whether to bypass the image attestations verification + # When set to true: the selector "image-attestations:verified" is not generated. + # Default: false + # ignore_attestations = true + + # ignore_sct: specifies whether to bypass the Signed Certificate Timestamp (SCT) verification. + # An SCT is proof of inclusion in a Certificate Transparency log. + # Default: false + # ignore_sct = true + + # RegistryCredentials maps each registry URL to its corresponding authentication credentials. + # If no credentials are provided for a specific registry, the default keychain is used for authentication. + # registry_credentials = { + # "docker.io" = { username = "user1", password = "pass1" } + # "ghcr.io" = { username = "user2", password = "pass2" } + # "quay.io" = { username = "user3", password = "pass3" } + # } # } } } diff --git a/conf/server/server_full.conf b/conf/server/server_full.conf index 7dc181d6e4e..c31f80e4238 100644 --- a/conf/server/server_full.conf +++ b/conf/server/server_full.conf @@ -904,6 +904,39 @@ plugins { # } # } + # UpstreamAuthority "ejbca": Uses a connected EJBCA to sign SPIRE server + # intermediate certificates + UpstreamAuthority "ejbca" { + plugin_data { + # The hostname of the connected EJBCA server. + hostname = "ejbca.example.com" + # (optional) The path to the CA certificate file used to validate the + # EJBCA server's certificate. Certificates must be in PEM format. + ca_cert_path = "/path/to/ca_cert.pem" + # The path to the client certificate (public key only) used + # to authenticate to EJBCA. Must be in PEM format. + client_cert_path = "/path/to/client_cert.pem" + # The path to the client key matching `client_cert` used to + # authenticate to EJBCA. Must be in PEM format. + client_cert_key_path = "/path/to/client_key.pem" + # The name of a CA in the connected EJBCA instance that will + # issue the intermediate signing certificates. + ca_name = "Fake-Sub-CA" + # The name of an end entity profile in the connected EJBCA + # instance that is configured to issue SPIFFE certificates. + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + # The name of a certificate profile in the connected EJBCA instance + # that is configured to issue intermediate CA certificates. + certificate_profile_name = "fakeSubCACP" + # (optional) The name of the end entity, or configuration for how + # the EJBCA UpstreamAuthority should determine the end entity name. + end_entity_name = "" + # (optional) An account binding ID in EJBCA to associate with issued certificates. + account_binding_id = "abc123" + } + } + + # BundlePublisher "aws_s3": A bundle publisher that puts the current trust # bundle of the server in a designated Amazon S3 bucket, keeping it updated. # BundlePublisher "aws_s3" { diff --git a/doc/plugin_agent_nodeattestor_http_challenge.md b/doc/plugin_agent_nodeattestor_http_challenge.md new file mode 100644 index 00000000000..5fd14c1e3d8 --- /dev/null +++ b/doc/plugin_agent_nodeattestor_http_challenge.md @@ -0,0 +1,49 @@ +# Agent plugin: NodeAttestor "http_challenge" + +*Must be used in conjunction with the server-side http_challenge plugin* + +The `http_challenge` plugin handshakes via http to ensure the agent is running on a valid +dns name. + +The SPIFFE ID produced by the server-side `http_challenge` plugin is based on the dns name of the agent. +The SPIFFE ID has the form: + +```xml +spiffe:///spire/agent/http_challenge/ +``` + +| Configuration | Description | Default | +|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------|-----------| +| `hostname` | Hostname to use for handshaking. If unset, it will be automatically detected. | | +| `agentname` | Name of this agent on the host. Useful if you have multiple agents bound to different spire servers on the same host and sharing the same port. | "default" | +| `port` | The port to listen on. If unspecified, a random value will be used. | random | +| `advertised_port` | The port to tell the server to call back on. | $port | + +If `advertised_port` != `port`, you will need to setup an http proxy between the two ports. This is useful if you already run a webserver on port 80. + +A sample configuration: + +```hcl + NodeAttestor "http_challenge" { + plugin_data { + port = 80 + } + } +``` + +## Proxies + +Say you want to validate using port 80 to be internet firewall friendly. If you already have a webserver on port 80 or want to use multiple agents with different SPIRE servers and use the same port, +you can have your webserver proxy over to the SPIRE agent(s) by setting up a proxy on `/.well-known/spiffe/nodeattestor/http_challenge/$agentname` to +`http://localhost:$port/.well-known/spiffe/nodeattestor/http_challenge/$agentname`. + +Example spire agent configuration: + +```hcl + NodeAttestor "http_challenge" { + plugin_data { + port = 8080 + advertised_port = 80 + } + } +``` diff --git a/doc/plugin_agent_workloadattestor_docker.md b/doc/plugin_agent_workloadattestor_docker.md index 41462d9cbf4..2123ee1a13f 100644 --- a/doc/plugin_agent_workloadattestor_docker.md +++ b/doc/plugin_agent_workloadattestor_docker.md @@ -10,7 +10,7 @@ then querying the docker daemon for the container's labels. | docker_version | The API version of the docker daemon. If not specified | | | container_id_cgroup_matchers | A list of patterns used to discover container IDs from cgroup entries (Unix) | | | docker_host | The location of the Docker Engine API endpoint (Windows only) | "npipe:////./pipe/docker_engine" | -| use_new_container_locator | If true, enables the new container locator algorithm that has support for cgroups v2 | false | +| use_new_container_locator | If true, enables the new container locator algorithm that has support for cgroups v2 | true | | verbose_container_locator_logs | If true, enables verbose logging of mountinfo and cgroup information used to locate containers | false | A sample configuration: @@ -22,6 +22,35 @@ A sample configuration: } ``` +## Sigstore experimental feature + +This feature extends the `docker` workload attestor with the ability to validate container image signatures and attestations using the [Sigstore](https://www.sigstore.dev/) ecosystem. + +### Experimental options + +| Option | Description | +|------------|-----------------------------------------------------------------------------------------| +| `sigstore` | Sigstore options. Options described below. See [Sigstore options](#sigstore-options) | + +### Sigstore options + +| Option | Description | +|------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `allowed_identities` | Maps OIDC Provider URIs to lists of allowed subjects. Supports regular expressions patterbs. Defaults to empty. If unspecified, signatures from any issuer are accepted. (eg. `"https://accounts.google.com" = ["subject1@example.com","subject2@example.com"]`). | +| `skipped_images` | Lists image IDs to exclude from Sigstore signature verification. For these images, no Sigstore selectors will be generated. Defaults to an empty list. | +| `rekor_url` | Specifies the Rekor URL for transparency log verification. Default is the public Rekor instance [https://rekor.sigstore.dev](https://rekor.sigstore.dev). | +| `ignore_tlog` | If set to true, bypasses the transparency log verification and the selectors based on the Rekor bundle are not generated. | +| `ignore_attestations` | If set to true, bypasses the image attestations verification and the selector `image-attestations:verified` is not generated. | +| `ignore_sct` | If set to true, bypasses the Signed Certificate Timestamp (SCT) verification. | +| `registry_credentials` | Maps each registry URL to its corresponding authentication credentials. Example: `{"docker.io": {"username": "user", "password": "pass"}}`. | + +#### Custom CA Roots + +Custom CA roots signed through TUF can be provided using the `cosign initialize` command. This method securely pins the +CA roots, ensuring that only trusted certificates are used during validation. Additionally, trusted roots for +certificate validation can be specified via the `SIGSTORE_ROOT_FILE` environment variable. For more details on Cosign +configurations, refer to the [documentation](https://github.com/sigstore/cosign/blob/main/README.md). + ## Workload Selectors Since selectors are created dynamically based on the container's docker labels, there isn't a list of known selectors. @@ -33,6 +62,22 @@ Instead, each of the container's labels are used in creating the list of selecto | `docker:env` | `docker:env:VAR=val` | The raw string value of each of the container's environment variables. | | `docker:image_id` | `docker:image_id:envoyproxy/envoy:contrib-v1.29.1` | The image name and version of the container. | +Sigstore enabled selectors (available when configured to use `sigstore`) + +| Selector | Value | +|-----------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| docker:image-signature:verified | When the image signature was verified and is valid. | +| docker:image-attestations:verified | When the image attestations were verified and are valid. | +| docker:image-signature-value | The base64 encoded value of the signature (eg. `k8s:image-signature-content:MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=`) | +| docker:image-signature-subject | The OIDC principal that signed the image (e.g., `k8s:image-signature-subject:spirex@example.com`) | +| docker:image-signature-issuer | The OIDC issuer of the signature (e.g., `k8s:image-signature-issuer:https://accounts.google.com`) | +| docker:image-signature-log-id | A unique LogID for the Rekor transparency log entry (eg. `k8s:image-signature-log-id:c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b95918123`) | +| docker:image-signature-log-index | The log index for the Rekor transparency log entry (eg. `k8s:image-signature-log-index:105695637`) | +| docker:image-signature-integrated-time | The time (in Unix timestamp format) when the image signature was integrated into the signature transparency log (eg. `k8s:image-signature-integrated-time:1719237832`) | +| docker:image-signature-signed-entry-timestamp | The base64 encoded signed entry (signature over the logID, logIndex, body and integratedTime) (eg. `k8s:image-signature-integrated-time:MEQCIDP77vB0/MEbR1QKZ7Ol8PgFwGEEvnQJiv5cO7ATDYRwAiB9eBLYZjclxRNaaNJVBdQfP9Y8vGVJjwdbisme2cKabc`) | + +If `ignore_tlog` is set to `true`, the selectors based on the Rekor bundle (`-log-id`, `-log-index`, `-integrated-time`, and `-signed-entry-timestamp`) are not generated. + ## Container ID CGroup Matchers The patterns provided should use the wildcard `*` matching token and `` capture token diff --git a/doc/plugin_agent_workloadattestor_k8s.md b/doc/plugin_agent_workloadattestor_k8s.md index fc467f5b74f..8be4b725390 100644 --- a/doc/plugin_agent_workloadattestor_k8s.md +++ b/doc/plugin_agent_workloadattestor_k8s.md @@ -60,42 +60,37 @@ since [hostprocess](https://kubernetes.io/docs/tasks/configure-pod-container/cre | `node_name_env` | The environment variable used to obtain the node name. Defaults to `MY_NODE_NAME`. | | `node_name` | The name of the node. Overrides the value obtained by the environment variable specified by `node_name_env`. | | `experimental` | The experimental options that are subject to change or removal. | -| `use_new_container_locator` | If true, enables the new container locator algorithm that has support for cgroups v2. Defaults to false. | -| `verbose_container_locator_logs` | If true, enables verbose logging of mountinfo and cgroup information used to locate containers. Defaults to false. | +| `use_new_container_locator` | If true, enables the new container locator algorithm that has support for cgroups v2. Defaults to true. | +| `verbose_container_locator_logs` | If true, enables verbose logging of mountinfo and cgroup information used to locate containers. Defaults to false. | -| Experimental options | Description | -|----------------------|----------------------------------------------------------------------------------------------------------------------------- | -| `sigstore` | Sigstore options. Options described below. See [Sigstore workload attestor for SPIRE](#sigstore-workload-attestor-for-spire) | +## Sigstore experimental feature -| Sigstore options | Description | -|------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `skip_signature_verification_image_list` | The list of images, described as digest hashes, that should be skipped in signature verification. Defaults to empty list. | -| `allowed_subjects_list` | A map of allowed subject strings, keyed by the OIDC Provider URI, that are trusted and are allowed to sign container images artifacts. Defaults to empty. If empty, no workload will pass signature validation, unless listed on `skip_signature_verification_image_list`. (eg. `"https://accounts.google.com" = ["subject1@example.com","subject2@example.com"]`). | -| `rekor_url` | The rekor URL to use with cosign. Required. See notes below. | -| `enforce_sct` | A boolean to be set to false in case of a private deployment, not using public CT | +This feature extends the `k8s` workload attestor with the ability to validate container image signatures and attestations using the [Sigstore](https://www.sigstore.dev/) ecosystem. -> **Note** Cosign discourages the use of image tags for referencing docker images, and this plugin does not support attestation of sigstore selectors for workloads running on containers using tag-referenced images, which will then fail attestation for both sigstore and k8s selectors. In cases where this is necessary, add the digest string for the image in the `skip_signature_verification_image_list` setting (eg. `"sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"`). Note that sigstore signature attestation will still not be performed, but this will allow k8s selectors to be returned, along with the `"k8s:sigstore-validation:passed"` selector. +### Experimental options - - -> **Note** Since the SPIRE Agent can also go through workload attestation, it will also need to be included in the skip list if either its image is not signed or has a digest reference string. - - - -> **Note** The sigstore project contains a transparency log called Rekor that provides an immutable, tamper-resistant ledger to record signed metadata to an immutable record. While it is possible to run your own instance, a public instance of rekor is available at `https://rekor.sigstore.dev/`. +| Option | Description | +|------------|-------------------------------------------------------------------------------------------| +| `sigstore` | Sigstore options. Options described below. See [Sigstore options](#sigstore-options) | -## Sigstore workload attestor for SPIRE +### Sigstore options -### Platform support - -This capability is only supported on Unix systems. - -The k8s workload attestor plugin also has capabilities to validate container images signatures through [sigstore](https://www.sigstore.dev/) +| Option | Description | +|------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `allowed_identities` | Maps OIDC Provider URIs to lists of allowed subjects. Supports regular expressions patterbs. Defaults to empty. If unspecified, signatures from any issuer are accepted. (eg. `"https://accounts.google.com" = ["subject1@example.com","subject2@example.com"]`). | +| `skipped_images` | Lists image IDs to exclude from Sigstore signature verification. For these images, no Sigstore selectors will be generated. Defaults to an empty list. | +| `rekor_url` | Specifies the Rekor URL for transparency log verification. Default is the public Rekor instance [https://rekor.sigstore.dev](https://rekor.sigstore.dev). | +| `ignore_tlog` | If set to true, bypasses the transparency log verification and the selectors based on the Rekor bundle are not generated. | +| `ignore_attestations` | If set to true, bypasses the image attestations verification and the selector `image-attestations:verified` is not generated. | +| `ignore_sct` | If set to true, bypasses the Signed Certificate Timestamp (SCT) verification. | +| `registry_credentials` | Maps each registry URL to its corresponding authentication credentials. Example: `{"docker.io": {"username": "user", "password": "pass"}}`. | -Cosign supports container signing, verification, and storage in an OCI registry. Cosign aims to make signatures invisible infrastructure. For this, we’ve chosen the Sigstore ecosystem and artifacts. Digging deeper, we are using: Rekor (signature transparency log), Fulcio (signing certificate issuer and certificate transparency log) and Cosign (container image signing tool) to guarantee the authenticity of the running workload. +#### Custom CA Roots -> **Note** you can provide your own CA roots signed through TUF via the cosign initialize command. -This effectively securely pins the CA roots. We allow you to also specify trusted roots via the `SIGSTORE_ROOT_FILE` flag +Custom CA roots signed through TUF can be provided using the `cosign initialize` command. This method securely pins the +CA roots, ensuring that only trusted certificates are used during validation. Additionally, trusted roots for +certificate validation can be specified via the `SIGSTORE_ROOT_FILE` environment variable. For more details on Cosign +configurations, refer to the [documentation](https://github.com/sigstore/cosign/blob/main/README.md). ### K8s selectors @@ -116,15 +111,22 @@ This effectively securely pins the CA roots. We allow you to also specify truste | k8s:pod-init-image | An Image OR ImageID of any init container in the workload's pod, [as reported by K8S](https://pkg.go.dev/k8s.io/api/core/v1#ContainerStatus). Selector value may be an image tag, such as: `docker.io/envoyproxy/envoy-alpine:v1.16.0`, or a resolved SHA256 image digest, such as `docker.io/envoyproxy/envoy-alpine@sha256:bf862e5f5eca0a73e7e538224578c5cf867ce2be91b5eaed22afc153c00363eb` | | k8s:pod-init-image-count | The number of init container images in workload's pod | -Sigstore enabled selectors (available when configured to use sigstore) +Sigstore enabled selectors (available when configured to use `sigstore`) + +| Selector | Value | +|--------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| k8s:image-signature:verified | When the image signature was verified and is valid. | +| k8s:image-attestations:verified | When the image attestations were verified and are valid. | +| k8s:image-signature-value | The base64 encoded value of the signature (eg. `k8s:image-signature-content:MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=`) | +| k8s:image-signature-subject | The OIDC principal that signed the image (e.g., `k8s:image-signature-subject:spirex@example.com`) | +| k8s:image-signature-issuer | The OIDC issuer of the signature (e.g., `k8s:image-signature-issuer:https://accounts.google.com`) | +| k8s:image-signature-log-id | A unique LogID for the Rekor transparency log entry (eg. `k8s:image-signature-log-id:c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b95918123`) | +| k8s:image-signature-log-index | The log index for the Rekor transparency log entry (eg. `k8s:image-signature-log-index:105695637`) | +| k8s:image-signature-integrated-time | The time (in Unix timestamp format) when the image signature was integrated into the signature transparency log (eg. `k8s:image-signature-integrated-time:1719237832`) | +| k8s:image-signature-signed-entry-timestamp | The base64 encoded signed entry (signature over the logID, logIndex, body and integratedTime) (eg. `k8s:image-signature-integrated-time:MEQCIDP77vB0/MEbR1QKZ7Ol8PgFwGEEvnQJiv5cO7ATDYRwAiB9eBLYZjclxRNaaNJVBdQfP9Y8vGVJjwdbisme2cKabc`) | + +If `ignore_tlog` is set to `true`, the selectors based on the Rekor bundle (`-log-id`, `-log-index`, `-integrated-time`, and `-signed-entry-timestamp`) are not generated. -| Selector | Value | -|----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| k8s:${containerID}:image-signature-content | A containerID is an unique alphanumeric number for each container. The value of the signature itself in a hash (eg. `k8s:000000:image-signature-content:MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=`) | -| k8s:${containerID}:image-signature-subject | OIDC principal that signed it​ (eg. `k8s:000000:image-signature-subject:spirex@example.com`) | -| k8s:${containerID}:image-signature-logid | A unique LogID for the Rekor transparency log​ (eg. `k8s:000000:image-signature-logid:samplelogID`) | -| k8s:${containerID}:image-signature-integrated-time | The time (in Unix timestamp format) when the image signature was integrated into the signature transparency log​ (eg. `k8s:000000:image-signature-integrated-time:12345`) | -| k8s:sigstore-validation | The confirmation if the signature is valid, has value of "passed" (eg. `k8s:sigstore-validation:passed`) | > **Note** `container-image` will ONLY match against the specific container in the pod that is contacting SPIRE on behalf of > the pod, whereas `pod-image` and `pod-init-image` will match against ANY container or init container in the Pod, > respectively. diff --git a/doc/plugin_server_nodeattestor_http_challenge.md b/doc/plugin_server_nodeattestor_http_challenge.md new file mode 100644 index 00000000000..515adf500ef --- /dev/null +++ b/doc/plugin_server_nodeattestor_http_challenge.md @@ -0,0 +1,55 @@ +# Server plugin: NodeAttestor "http_challenge" + +*Must be used in conjunction with the agent-side http_challenge plugin* + +The `http_challenge` plugin handshakes via http to ensure the agent is running on a valid +dns name. + +The SPIFFE ID produced by the plugin is based on the dns name attested. +The SPIFFE ID has the form: + +```xml +spiffe:///spire/agent/http_challenge/ +``` + +| Configuration | Description | Default | +|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------| +| `allowed_dns_patterns` | A list of regular expressions to match to the hostname being attested. If none match, attestation will fail. If unset, all hostnames are allowed. | | +| `required_port` | Set to a port number to require clients to listen only on that port. If unset, all port numbers are allowed | | +| `allow_non_root_ports` | Set to true to allow ports >= 1024 to be used by the agents with the advertised_port | true | +| `tofu` | Trust on first use of the successful challenge. Can only be disabled if allow_non_root_ports=false or required_port < 1024 | true | + +A sample configuration: + +```hcl + NodeAttestor "http_challenge" { + plugin_data { + # Only match hosts that start with p, have a number, then end in example.com. Ex: 'p1.example.com' + allowed_dns_patterns = ["p[0-9]\.example\.com"] + + # Only allow clients to use port 80 + required_port = 80 + + # Change the agent's SPIFFE ID format + # agent_path_template = "/spire/agent/http_challenge/{{ .Hostname }}" + } + } +``` + +## Selectors + +| Selector | Example | Description | +|----------|------------------------------------------|------------------------| +| Hostname | `http_challenge:hostname:p1.example.com` | The Subject's Hostname | + +## Security Considerations + +Generally, TCP ports are accessible to any user of the node. As a result, it is possible for non-agent code running on a node to attest to the SPIRE Server, allowing it to obtain any workload identity that the node is authorized to run. + +The `http_challenge` node attestor implements multiple features to mitigate the risk. + +Trust On First Use (or TOFU) is one such option. For any given node, attestation may occur only once when enabled. Subsequent attestation attempts will be rejected. + +With TOFU, it is still possible for non-agent code to complete node attestation before SPIRE Agent can, however this condition is easily and quickly detectable as SPIRE Agent will fail to start, and both SPIRE Agent and SPIRE Server will log the occurrence. Such cases should be investigated as possible security incidents. + +You also can require the port to be a trusted port that only trusted user such as root can open (port number < 1024). diff --git a/doc/plugin_server_upstreamauthority_ejbca.md b/doc/plugin_server_upstreamauthority_ejbca.md new file mode 100644 index 00000000000..d7203dcdcfe --- /dev/null +++ b/doc/plugin_server_upstreamauthority_ejbca.md @@ -0,0 +1,82 @@ +# Server plugin: UpstreamAuthority "ejbca" + +The `ejbca` UpstreamAuthority plugin uses a connected [EJBCA](https://www.ejbca.org/) to issue intermediate signing certificates for the SPIRE server. The plugin authenticates to EJBCA using mTLS (client certificate). + +> The EJBCA UpstreamAuthority plugin uses only the `/ejbca-rest-api/v1/certificate/pkcs10enroll` REST API endpoint, and is compatible with both [EJBCA Community](https://www.ejbca.org/) and [EJBCA Enterprise](https://www.keyfactor.com/products/ejbca-enterprise/). + +## Requirements + +* EJBCA [Community](https://www.ejbca.org/) or EJBCA [Enterprise](https://www.keyfactor.com/products/ejbca-enterprise/) + * The "REST Certificate Management" protocol must be enabled under System Configuration > Protocol Configuration. + +> It's important that the EJBCA Certificate Profile and End Entity Profile are properly configured before using this plugin. The plugin does not attempt to configure these profiles. Please refer to the [EJBCA Sub CA End Entity Profile & Certificate Profile Configuration](#ejbca-sub-ca-end-entity-profile--certificate-profile-configuration) section for more information. + +## Configuration + +The EJBCA UpstreamAuthority Plugin accepts the following configuration options. + +| Configuration | Description | Default from Environment Variables | +|----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------| +| `hostname` | The hostname of the connected EJBCA server. | | +| `ca_cert_path` | (optional) The path to the CA certificate file used to validate the EJBCA server's certificate. Certificates must be in PEM format. | `EJBCA_CA_CERT_PATH` | +| `client_cert_path` | The path to the client certificate (public key only) used to authenticate to EJBCA. Must be in PEM format. | `EJBCA_CLIENT_CERT_PATH` | +| `client_cert_key_path` | The path to the client key matching `client_cert` used to authenticate to EJBCA. Must be in PEM format. | `EJBCA_CLIENT_CERT_KEY_PATH` | +| `ca_name` | The name of a CA in the connected EJBCA instance that will issue the intermediate signing certificates. | | +| `end_entity_profile_name` | The name of an end entity profile in the connected EJBCA instance that is configured to issue SPIFFE certificates. | | +| `certificate_profile_name` | The name of a certificate profile in the connected EJBCA instance that is configured to issue intermediate CA certificates. | | +| `end_entity_name` | (optional) The name of the end entity, or configuration for how the EJBCA UpstreamAuthority should determine the end entity name. See [End Entity Name Customization](#ejbca-end-entity-name-customization-leaf-certificates) for more info. | | +| `account_binding_id` | (optional) An account binding ID in EJBCA to associate with issued certificates. | | + +> Configuration parameters that have an override from Environment Variables will always override the provided value from the SPIRE configuration with the values in the environment. +> +> If all configuration parameters for the selected auth method are specified by environment variables, an empty block still must exist to select the auth method. + +```hcl +UpstreamAuthority "ejbca" { + plugin_data { + hostname = "ejbca.example.com" + ca_cert_path = "/path/to/ca_cert.pem" + client_cert_path = "/path/to/client_cert.pem" + client_cert_key_path = "/path/to/client_key.pem" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + end_entity_name = "cn" + account_binding_id = "foo123" + } +} +``` + +## EJBCA Sub CA End Entity Profile & Certificate Profile Configuration + +The connected EJBCA instance must have at least one Certificate Profile and at least one End Entity Profile capable of issuing SPIFFE certificates. The Certificate Profile must be of type `Sub CA`, and must be able to issue certificates with the ECDSA prime256v1 algorithm, at a minimum. The SPIRE Server configuration may require additional fields. + +The End Entity Profile must have the following Subject DN Attributes: + +* `serialNumber, Serial number (in DN)` [modifiable] +* `O, Organization` [modifiable] +* `C, Country (ISO 3166)` [modifiable] + +And the following Other Subject Attributes: + +* `Uniform Resource Identifier (URI)` [modifiable] + +## EJBCA End Entity Name Customization (leaf certificates) + +The EJBCA UpstreamAuthority plugin allows users to determine how the End Entity Name is selected at runtime. Here are the options you can use for `end_entity_name`: + +* **`cn`:** Uses the Common Name from the CSR's Distinguished Name. +* **`dns`:** Uses the first DNS Name from the CSR's Subject Alternative Names (SANs). +* **`uri`:** Uses the first URI from the CSR's Subject Alternative Names (SANs). +* **`ip`:** Uses the first IP Address from the CSR's Subject Alternative Names (SANs). +* **Custom Value:** Any other string will be directly used as the End Entity Name. + +By default, SPIRE issues certificates with no DN and only the SPIFFE ID in the SANs. If you want to use the SPIFFE ID as the End Entity Name, you can usually leave this field blank or set it to `uri`. + +If the endEntityName field is not explicitly set, the EJBCA UpstreamAuthority plugin will attempt to determine the End Entity Name using the following default behavior: + +* **First, it will try to use the Common Name:** It looks at the Common Name from the CSR's Distinguished Name. +* **If the Common Name is not available, it will use the first DNS Name:** It looks at the first DNS Name from the CSR's Subject Alternative Names (SANs). +* **If the DNS Name is not available, it will use the first URI:** It looks at the first URI from the CSR's Subject Alternative Names (SANs). +* **If the URI is not available, it will use the first IP Address:** It looks at the first IP Address from the CSR's Subject Alternative Names (SANs). +* **If none of the above are available, it will return an error.** diff --git a/doc/spire_agent.md b/doc/spire_agent.md index de5054d607d..4954d6b176f 100644 --- a/doc/spire_agent.md +++ b/doc/spire_agent.md @@ -71,12 +71,13 @@ This may be useful for templating configuration files, for example across differ | `workload_x509_svid_key_type` | The workload X509 SVID key type <rsa-2048|ec-p256> | ec-p256 | | `availability_target` | The minimum amount of time desired to gracefully handle SPIRE Server or Agent downtime. This configurable influences how aggressively X509 SVIDs should be rotated. If set, must be at least 24h. See [Availability Target](#availability-target) | | -| experimental | Description | Default | -|:---------------------------|------------------------------------------------------------------------------------|-------------------------| -| `named_pipe_name` | Pipe name to bind the SPIRE Agent API named pipe (Windows only) | \spire-agent\public\api | -| `sync_interval` | Sync interval with SPIRE server with exponential backoff | 5 sec | -| `x509_svid_cache_max_size` | Soft limit of max number of SVIDs that would be stored in LRU cache (deprecated) | 1000 | -| `disable_lru_cache` | Reverts back to use the SPIRE Agent non-LRU cache for storing SVIDs (deprecated) | false | +| experimental | Description | Default | +|:------------------------------|--------------------------------------------------------------------------------------|-------------------------| +| `named_pipe_name` | Pipe name to bind the SPIRE Agent API named pipe (Windows only) | \spire-agent\public\api | +| `sync_interval` | Sync interval with SPIRE server with exponential backoff | 5 sec | +| `x509_svid_cache_max_size` | Soft limit of max number of SVIDs that would be stored in LRU cache (deprecated) | 1000 | +| `disable_lru_cache` | Reverts back to use the SPIRE Agent non-LRU cache for storing SVIDs (deprecated) | false | +| `use_sync_authorized_entries` | Use SyncAuthorizedEntries API for periodically synchronization of authorized entries | false | ### Initial trust bundle configuration @@ -369,7 +370,31 @@ plugins { ## Delegated Identity API -The Delegated Identity API allows an authorized (i.e. delegated) workload to obtain SVIDs and bundles on behalf of workloads that cannot be attested by SPIRE Agent directly. The authorized workload does so by providing SPIRE Agent the selectors that would normally be obtained during workload attestation. The Delegated Identity API is served over the admin API endpoint. +The Delegated Identity API allows an authorized (i.e. delegated) workload to obtain SVIDs and bundles on behalf of workloads that cannot be attested by SPIRE Agent directly. + +The Delegated Identity API is served over the SPIRE Agent's admin API endpoint. + +Note that this explicitly and by-design grants the authorized delegate workload the ability to impersonate any of the other workloads it can obtain SVIDs for. Any workload authorized to use the +Delegated Identity API becomes a "trusted delegate" of the SPIRE Agent, and may impersonate and act on behalf of all workload SVIDs it obtains from the SPIRE Agent. + +The trusted delegate workload itself is attested by the SPIRE Agent first, and the delegate's SPIFFE ID is checked against an allowlist of authorized delegates. + +Once these requirements are met, the trusted delegate workload can obtain SVIDS for any workloads in the scope of the SPIRE Agent instance it is interacting with. + +There are two ways the trusted delegate workload can request SVIDs for other workloads from the SPIRE Agent: + +1. By attesting the other workload itself, building a set of selectors, and then providing SPIRE Agent those selectors over the Delegated Identity API. + In this approach, the trusted delegate workload is entirely responsible for attesting the other workload and building the attested selectors. + When those selectors are presented to the SPIRE Agent, the SPIRE Agent will simply return SVIDs for any workload registration entries that match the provided selectors. + No other checks or attestations will be performed by the SPIRE Agent. + +1. By obtaining a PID for the other workload, and providing that PID to the SPIRE Agent over the Delegated Identity API. + In this approach, the SPIRE Agent will do attestation for the provided PID, build the attested selectors, and return SVIDs for any workload registration entries that match the selectors the SPIRE Agent attested from that PID. + This differs from the previous approach in that the SPIRE Agent itself (not the trusted delegate) handles the attestation of the other workload. + On most platforms PIDs are not stable identifiers, so the trusted delegate workload **must** ensure that the PID it provides to the SPIRE Agent + via the Delegated Identity API for attestation is not recycled between the time a trusted delegate makes an Delegate Identity API request, and obtains a Delegate Identity API response. + How this is accomplished is platform-dependent and the responsibility of the trusted delegate (e.g. by using pidfds on Linux). + Attestation results obtained via the Delegated Identity API for a PID are valid until the process referred to by the PID terminates, or is re-attested - whichever comes first. To enable the Delegated Identity API, configure the admin API endpoint address and the list of SPIFFE IDs for authorized delegates. For example: diff --git a/doc/spire_server.md b/doc/spire_server.md index dbc9ce773e2..fdff08ed0f1 100644 --- a/doc/spire_server.md +++ b/doc/spire_server.md @@ -16,6 +16,7 @@ This document is a configuration reference for SPIRE Server. It includes informa ## Built-in plugins + | Type | Name | Description | |--------------------|--------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------| | DataStore | [sql](/doc/plugin_server_datastore_sql.md) | An SQL database storage for SQLite, PostgreSQL and MySQL databases for the SPIRE datastore | @@ -83,6 +84,7 @@ This may be useful for templating configuration files, for example across differ | `ratelimit` | Rate limiting configurations, usually used when the server is behind a load balancer (see below) | | | `socket_path` | Path to bind the SPIRE Server API socket to (Unix only) | /tmp/spire-server/private/api.sock | | `trust_domain` | The trust domain that this server belongs to (should be no more than 255 characters) | | +| `use_legacy_downstream_x509_ca_ttl` | Use the downstream spire-server registration entry TTL as the downstream CA TTL. This is deprecated and will be removed in a future version. | true | | ca_subject | Description | Default | |:----------------------------|--------------------------------|----------------| @@ -95,6 +97,7 @@ This may be useful for templating configuration files, for example across differ | `cache_reload_interval` | The amount of time between two reloads of the in-memory entry cache. Increasing this will mitigate high database load for extra large deployments, but will also slow propagation of new or updated entries to agents. | 5s | | `events_based_cache` | Use events to update the cache with what's changed since the last update. Enabling this will reduce overhead on the database. | false | | `prune_events_older_than`| How old an event can be before being deleted. Used with events based cache. Decreasing this will keep the events table smaller, but will increase risk of missing an event if connection to the database is down. | 12h | +| `sql_transaction_timeout`| Maximum time an SQL transaction could take, used by the events based cache to determine when an event id is unlikely to be used anymore. | 24h | | `auth_opa_policy_engine` | The [auth opa_policy engine](/doc/authorization_policy_engine.md) used for authorization decisions | default SPIRE authorization policy | | `named_pipe_name` | Pipe name of the SPIRE Server API named pipe (Windows only) | \spire-server\private\api | @@ -386,8 +389,8 @@ Creates registration entries. | `-selector` | A colon-delimited type:value selector used for attestation. This parameter can be used more than once, to specify multiple selectors that must be satisfied. | | | `-socketPath` | Path to the SPIRE Server API socket | /tmp/spire-server/private/api.sock | | `-spiffeID` | The SPIFFE ID that this record represents and will be set to the SVID issued. | | -| `-x509SVIDTTL` | A TTL, in seconds, for any X509-SVID issued as a result of this record. Overrides `-ttl` value. | The TTL configured with `default_x509_svid_ttl` | -| `-jwtSVIDTTL` | A TTL, in seconds, for any JWT-SVID issued as a result of this record. Overrides `-ttl` value. | The TTL configured with `default_jwt_svid_ttl` | +| `-x509SVIDTTL` | A TTL, in seconds, for any X509-SVID issued as a result of this record. | The TTL configured with `default_x509_svid_ttl` | +| `-jwtSVIDTTL` | A TTL, in seconds, for any JWT-SVID issued as a result of this record. | The TTL configured with `default_jwt_svid_ttl` | | `-storeSVID` | A boolean value that, when set, indicates that the resulting issued SVID from this entry must be stored through an SVIDStore plugin | ### `spire-server entry update` @@ -407,8 +410,8 @@ Updates registration entries. | `-selector` | A colon-delimited type:value selector used for attestation. This parameter can be used more than once, to specify multiple selectors that must be satisfied. | | | `-socketPath` | Path to the SPIRE Server API socket | /tmp/spire-server/private/api.sock | | `-spiffeID` | The SPIFFE ID that this record represents and will be set to the SVID issued. | | -| `-x509SVIDTTL` | A TTL, in seconds, for any X509-SVID issued as a result of this record. Overrides `-ttl` value. | The TTL configured with `default_x509_svid_ttl` | -| `-jwtSVIDTTL` | A TTL, in seconds, for any JWT-SVID issued as a result of this record. Overrides `-ttl` value. | The TTL configured with `default_jwt_svid_ttl` | +| `-x509SVIDTTL` | A TTL, in seconds, for any X509-SVID issued as a result of this record. | The TTL configured with `default_x509_svid_ttl` | +| `-jwtSVIDTTL` | A TTL, in seconds, for any JWT-SVID issued as a result of this record. | The TTL configured with `default_jwt_svid_ttl` | | `storeSVID` | A boolean value that, when set, indicates that the resulting issued SVID from this entry must be stored through an SVIDStore plugin | ### `spire-server entry count` diff --git a/doc/telemetry/telemetry.md b/doc/telemetry/telemetry.md index 3c385d34260..a5bb1702a6a 100644 --- a/doc/telemetry/telemetry.md +++ b/doc/telemetry/telemetry.md @@ -51,7 +51,15 @@ The following metrics are emitted: | Call Counter | `datastore`, `registration_entry_event`, `list` | | The Datastore is listing a registration entry events. | | Call Counter | `datastore`, `registration_entry_event`, `prune` | | The Datastore is pruning expired registration entry events. | | Call Counter | `datastore`, `registration_entry_event`, `fetch` | | The Datastore is fetching a specific registration entry event. | -| Call Counter | `entry`, `cache`, `reload` | | The Server is reloading its in-memory entry cache from the datastore. | +| Call Counter | `entry`, `cache`, `reload` | | The Server is reloading its in-memory entry cache from the datastore | +| Gauge | `node`, `agents_by_id_cache`, `count` | | The Server is re-hydrating the agents-by-id event-based cache | +| Gauge | `node`, `agents_by_expiresat_cache`, `count` | | The Server is re-hydrating the agents-by-expiresat event-based cache | +| Gauge | `node`, `skipped_node_event_ids`, `count` | | The count of skipped ids detected in the last `sql_transaction_timout` period. For databases that autoincrement ids by more than one, this number will overreport the skipped ids. [Issue](https://github.com/spiffe/spire/issues/5341) | +| Gauge | `entry`, `nodealiases_by_entryid_cache`, `count` | | The Server is re-hydrating the nodealiases-by-entryid event-based cache | +| Gauge | `entry`, `nodealiases_by_selector_cache`, `count` | | The Server is re-hydrating the nodealiases-by-selector event-based cache | +| Gauge | `entry`, `entries_by_entryid_cache`, `count` | | The Server is re-hydrating the entries-by-entryid event-based cache | +| Gauge | `entry`, `entries_by_parentid_cache`, `count` | | The Server is re-hydrating the entries-by-parentid event-based cache | +| Gauge | `entry`, `skipped_entry_event_ids`, `count` | | The count of skipped ids detected in the last sql_transaction_timout period. For databases that autoincrement ids by more than one, this number will overreport the skipped ids. [Issue](https://github.com/spiffe/spire/issues/5341) | | Counter | `manager`, `jwt_key`, `activate` | | The CA manager has successfully activated a JWT Key. | | Gauge | `manager`, `x509_ca`, `rotate`, `ttl` | `trust_domain_id` | The CA manager is rotating the X.509 CA with a given TTL for a specific Trust Domain. | | Call Counter | `registration_entry`, `manager`, `prune` | | The Registration manager is pruning entries. | diff --git a/go.mod b/go.mod index ba1f6b8d45b..8c888468e25 100644 --- a/go.mod +++ b/go.mod @@ -1,50 +1,51 @@ module github.com/spiffe/spire -go 1.22.3 +go 1.23.0 require ( - cloud.google.com/go/iam v1.1.12 - cloud.google.com/go/kms v1.18.4 - cloud.google.com/go/secretmanager v1.13.5 - cloud.google.com/go/security v1.17.4 + cloud.google.com/go/iam v1.2.0 + cloud.google.com/go/kms v1.19.0 + cloud.google.com/go/secretmanager v1.14.0 + cloud.google.com/go/security v1.18.0 cloud.google.com/go/storage v1.43.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 - github.com/GoogleCloudPlatform/cloudsql-proxy v1.36.0 + github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.0 + github.com/Keyfactor/ejbca-go-client-sdk v1.0.2 github.com/Microsoft/go-winio v0.6.2 github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 - github.com/aws/aws-sdk-go-v2 v1.30.3 - github.com/aws/aws-sdk-go-v2/config v1.27.18 - github.com/aws/aws-sdk-go-v2/credentials v1.17.18 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 + github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/aws/aws-sdk-go-v2/config v1.27.27 + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.2 github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.173.0 - github.com/aws/aws-sdk-go-v2/service/iam v1.34.1 - github.com/aws/aws-sdk-go-v2/service/kms v1.35.1 - github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.0 + github.com/aws/aws-sdk-go-v2/service/iam v1.35.0 + github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 + github.com/aws/aws-sdk-go-v2/service/organizations v1.31.0 github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.14.0 - github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 + github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.1 - github.com/aws/aws-sdk-go-v2/service/sts v1.30.1 - github.com/aws/smithy-go v1.20.3 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 + github.com/aws/smithy-go v1.20.4 github.com/blang/semver/v4 v4.0.0 github.com/cenkalti/backoff/v4 v4.3.0 - github.com/docker/docker v27.1.1+incompatible - github.com/envoyproxy/go-control-plane v0.12.0 + github.com/docker/docker v27.2.1+incompatible + github.com/envoyproxy/go-control-plane v0.13.0 github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa github.com/go-jose/go-jose/v4 v4.0.4 github.com/go-sql-driver/mysql v1.8.1 github.com/godbus/dbus/v5 v5.1.0 - github.com/gofrs/uuid/v5 v5.2.0 + github.com/gofrs/uuid/v5 v5.3.0 github.com/gogo/status v1.1.1 - github.com/google/btree v1.1.2 + github.com/google/btree v1.1.3 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.20.1 + github.com/google/go-containerregistry v0.20.2 github.com/google/go-tpm v0.9.1 github.com/google/go-tpm-tools v0.4.4 github.com/googleapis/gax-go/v2 v2.13.0 @@ -53,123 +54,93 @@ require ( github.com/hashicorp/go-metrics v0.5.3 github.com/hashicorp/go-plugin v1.6.1 github.com/hashicorp/hcl v1.0.1-vault-5 - github.com/hashicorp/vault/api v1.14.0 - github.com/hashicorp/vault/sdk v0.13.0 + github.com/hashicorp/vault/api v1.15.0 + github.com/hashicorp/vault/sdk v0.14.0 github.com/imdario/mergo v0.3.16 github.com/imkira/go-observer v1.0.3 - github.com/jackc/pgx/v5 v5.6.0 + github.com/jackc/pgx/v5 v5.7.1 github.com/jinzhu/gorm v1.9.16 github.com/lestrrat-go/jwx/v2 v2.1.1 github.com/lib/pq v1.10.9 - github.com/mattn/go-sqlite3 v1.14.22 + github.com/mattn/go-sqlite3 v1.14.23 github.com/mitchellh/cli v1.1.5 - github.com/open-policy-agent/opa v0.67.0 - github.com/prometheus/client_golang v1.19.1 + github.com/open-policy-agent/opa v0.68.0 + github.com/prometheus/client_golang v1.20.3 github.com/shirou/gopsutil/v3 v3.24.5 - github.com/sigstore/cosign/v2 v2.2.4 + github.com/sigstore/cosign/v2 v2.4.0 github.com/sigstore/rekor v1.3.6 - github.com/sigstore/sigstore v1.8.7 + github.com/sigstore/sigstore v1.8.9 github.com/sirupsen/logrus v1.9.3 github.com/spiffe/go-spiffe/v2 v2.3.0 - github.com/spiffe/spire-api-sdk v1.2.5-0.20240627195926-b5ac064f580b + github.com/spiffe/spire-api-sdk v1.2.5-0.20240807182354-18e423ce2c1c github.com/spiffe/spire-plugin-sdk v1.4.4-0.20230721151831-bf67dde4721d github.com/stretchr/testify v1.9.0 github.com/uber-go/tally/v4 v4.1.16 github.com/valyala/fastjson v1.6.4 github.com/zeebo/errs v1.3.0 - golang.org/x/crypto v0.25.0 + golang.org/x/crypto v0.27.0 golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 - golang.org/x/net v0.27.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.190.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf - google.golang.org/grpc v1.65.0 + golang.org/x/net v0.29.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.25.0 + golang.org/x/time v0.6.0 + google.golang.org/api v0.197.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.66.2 google.golang.org/protobuf v1.34.2 - k8s.io/api v0.30.3 - k8s.io/apimachinery v0.30.3 - k8s.io/client-go v0.30.3 - k8s.io/kube-aggregator v0.30.3 - k8s.io/mount-utils v0.30.3 - sigs.k8s.io/controller-runtime v0.18.4 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 + k8s.io/kube-aggregator v0.31.1 + k8s.io/mount-utils v0.31.1 + sigs.k8s.io/controller-runtime v0.19.0 ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/longrunning v0.5.11 // indirect + cloud.google.com/go/longrunning v0.6.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect - github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect - github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect - github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/DataDog/datadog-go v3.2.0+incompatible // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect - github.com/ThalesIgnite/crypto11 v1.2.5 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect - github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect - github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect - github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect - github.com/alibabacloud-go/debug v1.0.0 // indirect - github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect - github.com/alibabacloud-go/openapi-util v0.1.0 // indirect - github.com/alibabacloud-go/tea v1.2.1 // indirect - github.com/alibabacloud-go/tea-utils v1.4.5 // indirect - github.com/alibabacloud-go/tea-xml v1.1.3 // indirect - github.com/aliyun/credentials-go v1.3.1 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 // indirect - github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/blang/semver v3.5.1+incompatible // indirect - github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect - github.com/clbanning/mxj/v2 v2.7.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b // indirect - github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect - github.com/coreos/go-oidc/v3 v3.11.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect - github.com/dimchansky/utfbom v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v24.0.7+incompatible // indirect + github.com/docker/cli v27.1.1+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -177,11 +148,11 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect @@ -202,25 +173,22 @@ require ( github.com/goccy/go-json v0.10.3 // indirect github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/certificate-transparency-go v1.1.8 // indirect + github.com/google/certificate-transparency-go v1.2.1 // indirect github.com/google/flatbuffers v23.5.26+incompatible // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-configfs-tsm v0.2.2 // indirect - github.com/google/go-github/v55 v55.0.0 // indirect - github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-sev-guest v0.9.3 // indirect github.com/google/go-tdx-guest v0.3.1 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/logger v1.1.1 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -237,13 +205,13 @@ require ( github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect @@ -256,34 +224,35 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/sys/mountinfo v0.6.2 // indirect + github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/opencontainers/runc v1.1.14 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pborman/uuid v1.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/posener/complete v1.2.3 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.51.1 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -291,64 +260,59 @@ require ( github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/segmentio/asm v1.2.0 // indirect - github.com/segmentio/ksuid v1.0.4 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/protobuf-specs v0.3.2 // indirect github.com/sigstore/timestamp-authority v1.2.2 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.18.2 // indirect + github.com/spf13/viper v1.19.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect - github.com/thales-e-security/pool v0.0.2 // indirect github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect - github.com/tjfoc/gmsm v1.4.1 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect github.com/twmb/murmur3 v1.1.8 // indirect github.com/vbatts/tar-split v0.11.5 // indirect - github.com/xanzy/go-gitlab v0.102.0 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.step.sm/crypto v0.44.2 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/release-utils v0.7.7 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 994df1b1a26..f371d807997 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,8 @@ cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34h cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= -cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= @@ -70,10 +70,10 @@ cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMK cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -217,8 +217,8 @@ cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHD cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= -cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= @@ -228,8 +228,8 @@ cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0 cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.18.4 h1:dYN3OCsQ6wJLLtOnI8DGUwQ5shMusXsWCCC+s09ATsk= -cloud.google.com/go/kms v1.18.4/go.mod h1:SG1bgQ3UWW6/KdPo9uuJnzELXY5YTTMJtDYvajiQ22g= +cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= @@ -239,8 +239,8 @@ cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6 cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.5.11 h1:Havn1kGjz3whCfoD8dxMLP73Ph5w+ODyZB9RUsDxtGk= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= @@ -328,15 +328,15 @@ cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJe cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.13.5 h1:tXlHvpm97mFD0Lv50N4U4zlXfkoTNay3BmpNA/W7/oI= -cloud.google.com/go/secretmanager v1.13.5/go.mod h1:/OeZ88l5Z6nBVilV0SXgv6XJ243KP2aIhSWRMrbvDCQ= +cloud.google.com/go/secretmanager v1.14.0 h1:P2RRu2NEsQyOjplhUPvWKqzDXUKzwejHLuSUBHI8c4w= +cloud.google.com/go/secretmanager v1.14.0/go.mod h1:q0hSFHzoW7eRgyYFH8trqEFavgrMeiJI4FETNN78vhM= cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.17.4 h1:ERhxAa02mnMEIIAXvzje+qJ+yWniP6l5uOX+k9ELCaA= -cloud.google.com/go/security v1.17.4/go.mod h1:KMuDJH+sEB3KTODd/tLJ7kZK+u2PQt+Cfu0oAxzIhgo= +cloud.google.com/go/security v1.18.0 h1:CjBd67GVb+Oenjt4VsUw0RUQktSIgexTJN3UQta7XRE= +cloud.google.com/go/security v1.18.0/go.mod h1:oS/kRVUNmkwEqzCgSmK2EaGd8SbDUvliEiADjSb/8Mo= cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= @@ -409,10 +409,10 @@ cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1V cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e h1:GwCVItFUPxwdsEYnlUcJ6PJxOjTeFFCKOh6QWg4oAzQ= -cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e/go.mod h1:ApHceQLLwcOkCEXM1+DyCXTHEJhNGDpJ2kmV6axsx24= -cuelang.org/go v0.8.1 h1:VFYsxIFSPY5KgSaH1jQ2GxHOrbu6Ga3kEI70yCZwnOg= -cuelang.org/go v0.8.1/go.mod h1:CoDbYolfMms4BhWUlhD+t5ORnihR7wvjcfgyO9lL5FI= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 h1:BnG6pr9TTr6CYlrJznYUDj6V7xldD1W+1iXPum0wT/w= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2/go.mod h1:pK23AUVXuNzzTpfMCA06sxZGeVQ/75FdVtW249de9Uo= +cuelang.org/go v0.9.2 h1:pfNiry2PdRBr02G/aKm5k2vhzmqbAOoaB4WurmEbWvs= +cuelang.org/go v0.9.2/go.mod h1:qpAYsLOf7gTM1YdEg6cxh553uZ4q9ZDWlPbtZr9q1Wk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -422,8 +422,8 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= @@ -452,23 +452,16 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25 github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= @@ -479,8 +472,10 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/GoogleCloudPlatform/cloudsql-proxy v1.36.0 h1:kAtNAWwvTt5+iew6baV0kbOrtjYTXPtWNSyOFlcxkBU= -github.com/GoogleCloudPlatform/cloudsql-proxy v1.36.0/go.mod h1:VRKXU8C7Y/aUKjRBTGfw0Ndv4YqNxlB8zAPJJDxbASE= +github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.0 h1:gl5KGBBLKXc4BVKkyOJW9w6B890gUkoDkG/pYkTTQHE= +github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.0/go.mod h1:43xFPKNglOf/bHDR1DbcGTp4Erza2TiUJaU+X9L+1AI= +github.com/Keyfactor/ejbca-go-client-sdk v1.0.2 h1:pPnXCFfIFAwCjJrg1BtYlzoF8oHQ52sPOMs/uZ9uvZA= +github.com/Keyfactor/ejbca-go-client-sdk v1.0.2/go.mod h1:4Sv/KGVgRV4VXKko1ajfTaJwqJ5Aiw0VrDI9S7IcQ1g= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= @@ -510,46 +505,26 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= -github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU= -github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY= github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= -github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= -github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA= github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= -github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= -github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= -github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= -github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY= github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= -github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= -github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= -github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= -github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= -github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= -github.com/alibabacloud-go/tea v1.1.19/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask= github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA= -github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= -github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= -github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= -github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= -github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28= github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 h1:MzBOUgng9orim59UnfUTLRjMpd09C5uEVQ6RPGeCaVI= @@ -563,64 +538,64 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.51.6 h1:Ld36dn9r7P9IjU8WZSaswQ8Y/XUCRpewim5980DwYiU= -github.com/aws/aws-sdk-go v1.51.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= -github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= -github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk= -github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzSClHZje/6JkPW5aZyEvrQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= +github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.2 h1:TFju6ZoqO3TnX0C42VmYW4TxNcUFfbV/3cnaOxbcc5Y= github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.4.2/go.mod h1:HLaNMGEhcO6GnJtrozRtluhCVM5/B/ZV5XHQ477uIgA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16 h1:mimdLQkIX1zr8GIPY1ZtALdBQGxcASiBd2MOp8m/dMc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.16/go.mod h1:YHk6owoSwrIsok+cAH9PENCOGoH5PU2EllX4vLtSrsY= github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0 h1:GZ7eaCsYZar0pOQPzBJeP8ImFEzDpPFbJ52JCiF9HQ4= github.com/aws/aws-sdk-go-v2/service/acmpca v1.35.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.173.0 h1:ta62lid9JkIpKZtZZXSj6rP2AqY5x1qYGq53ffxqD9Q= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.173.0/go.mod h1:o6QDjdVKpP5EF0dp/VlvqckzuSDATr1rLdHt3A5m0YY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.0 h1:LAdDRIj5BEZM9fLDTUWUyPzWvv5A++nCEps/RGmZNOo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.0/go.mod h1:ISODge3zgdwOEa4Ou6WM9PKbxJWJ15DYKnr2bfmCAIA= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 h1:3iaT/LnGV6jNtbBkvHZDlzz7Ky3wMHDJAyFtGd5GUJI= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7/go.mod h1:mtzCLxk6M+KZbkJdq3cUH9GCrudw8qCy5C3EHO+5vLc= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.6 h1:h+r5/diSwztgKgxUrntt6AOI5lBYY0ZJv+yzeulGZSU= github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.6/go.mod h1:7+5MHFC52LC85xKCjCuWDHmIncOOvWnll10OT9EAN/g= -github.com/aws/aws-sdk-go-v2/service/iam v1.34.1 h1:BzAfH/XAECH4P7toscHvBbyw9zuaEMT8gzEo40BaLDs= -github.com/aws/aws-sdk-go-v2/service/iam v1.34.1/go.mod h1:gCfCySFdW8/FaTC6jzPwmML5bOUGty9Eq/+SU2PFv0M= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.1 h1:0gP2OJJT6HM2BYltZ9x+A87OE8LJL96DXeAAdLv3t1M= -github.com/aws/aws-sdk-go-v2/service/kms v1.35.1/go.mod h1:hGONorZkQCfR5DW6l2xdy7zC8vfO0r9pJlwyg6gmGeo= -github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2 h1:+tGF0JH2u4HwneqNFAKFHqENwfpBweKj67+LbwTKpqE= -github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2/go.mod h1:6wxO8s5wMumyNRsOgOgcIvqvF8rIf8Cj7Khhn/bFI0c= +github.com/aws/aws-sdk-go-v2/service/iam v1.35.0 h1:xIjTizH74aMNQBjp9D5cvjRZmOYtnrpjOGU3xkVqrjk= +github.com/aws/aws-sdk-go-v2/service/iam v1.35.0/go.mod h1:IdHqqRLKgxYR4IY7Omd7SuV4SJzJ8seF+U5PW+mvtP4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 h1:UPTdlTOwWUX49fVi7cymEN6hDqCwe3LNv1vi7TXUutk= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.3/go.mod h1:gjDP16zn+WWalyaUqwCCioQ8gU8lzttCCc9jYsiQI/8= +github.com/aws/aws-sdk-go-v2/service/organizations v1.31.0 h1:D+q5pWmlcuqISBcLIeeYFukvl33JgQr/1lfbQnrIvVk= +github.com/aws/aws-sdk-go-v2/service/organizations v1.31.0/go.mod h1:qdJX3WZbuAan5dXCoinnJjuY1QERCpv3glXeI3+wbeA= github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.14.0 h1:LoDKjG6X8Hj/Kiqmgpu/jW52GDTeToC6BehMbgHsZkg= github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.14.0/go.mod h1:7IIMPfX6TzfxRIJIp1NLYWFkApDOMnlb5XrynzpxMkA= -github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0 h1:Wb544Wh+xfSXqJ/j3R4aX9wrKUoZsJNmilBYZb3mKQ4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.1 h1:ZoYRD8IJqPkzjBnpokiMNO6L/DQprtpVpD6k0YSaF5U= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.1/go.mod h1:GlRarZzIMl9VDi0mLQt+qQOuEkVFPnTkkjyugV1uVa8= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.1 h1:+woJ607dllHJQtsnJLi52ycuqHMwlW+Wqm2Ppsfp4nQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.1/go.mod h1:jiNR3JqT15Dm+QWq2SRgh0x0bCNSRP2L25+CqPNpJlQ= -github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= -github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -633,20 +608,19 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdn github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= -github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= -github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70= -github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM= -github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM= -github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE= -github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bufbuild/protocompile v0.10.0 h1:+jW/wnLMLxaCEG8AX9lD0bQ5v9h1RUiMKOBOT5ll9dM= +github.com/bufbuild/protocompile v0.10.0/go.mod h1:G9qQIQo0xZ6Uyj6CMNz0saGmx2so+KONo8/KrELABiY= +github.com/buildkite/agent/v3 v3.76.2 h1:SweFq3e0N20RikWsVeOXzTjfr0AoOskxm9c0bcNyI0E= +github.com/buildkite/agent/v3 v3.76.2/go.mod h1:9ffbmJD7d7C/nOcElj6Qm+uIj1QoYh3NNvka4rkKkss= +github.com/buildkite/go-pipeline v0.10.0 h1:EDffu+LfMY2k5u+iEdo6Jn3obGKsrL5wicc1O/yFeRs= +github.com/buildkite/go-pipeline v0.10.0/go.mod h1:eMH1kiav5VeiTiu0Mk2/M7nZhKyFeL4iGj7Y7rj4f3w= +github.com/buildkite/interpolate v0.1.3 h1:OFEhqji1rNTRg0u9DsSodg63sjJQEb1uWbENq9fUOBM= +github.com/buildkite/interpolate v0.1.3/go.mod h1:UNVe6A+UfiBNKbhAySrBbZFZFxQ+DXr9nWen6WVt/A8= +github.com/buildkite/roko v1.2.0 h1:hbNURz//dQqNl6Eo9awjQOVOZwSDJ8VEbBDxSfT9rGQ= +github.com/buildkite/roko v1.2.0/go.mod h1:23R9e6nHxgedznkwwfmqZ6+0VJZJZ2Sg/uVcp2cP46I= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/cactus/go-statsd-client/v5 v5.0.0/go.mod h1:COEvJ1E+/E2L4q6QE5CkjWPi4eeDw9maJBMIuMPBZbY= -github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= -github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -665,11 +639,9 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -698,6 +670,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSk github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= @@ -711,8 +685,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnN github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd h1:83Wprp6ROGeiHFAP8WJdI2RoxALQYgdllERc3N5N2DM= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= -github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= @@ -728,12 +700,12 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= -github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.1.1+incompatible h1:goaZxOqs4QKxznZjjBWKONQci/MywhtRv2oNn0GkeZE= +github.com/docker/cli v27.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.2.1+incompatible h1:fQdiLfW7VLscyoeYEBz7/J8soYFDZV1u6VW6gJEjNMI= +github.com/docker/docker v27.2.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= @@ -756,8 +728,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -765,8 +737,6 @@ github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -788,6 +758,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= @@ -838,25 +810,25 @@ github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3Bum github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= -github.com/go-rod/rod v0.116.1 h1:BDMZY3qm/14SmvHBV7DoFUhXeJ2MbUYgumQ88b+v2WE= -github.com/go-rod/rod v0.116.1/go.mod h1:3Ash9fYwznqz9S1uLQgQRStur4fCXjoxxGW+ym6TYjU= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid/v5 v5.2.0 h1:qw1GMx6/y8vhVsx626ImfKMuS5CvJmhIKKtuyvfajMM= -github.com/gofrs/uuid/v5 v5.2.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= +github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk= +github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a h1:dR8+Q0uO5S2ZBcs2IH6VBKYwSxPo2vYCYq0ot0mu7xA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -864,8 +836,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= @@ -916,10 +887,10 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= -github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/certificate-transparency-go v1.2.1 h1:4iW/NwzqOqYEEoCBEFP+jPbBXbLqMpq3CifMyOnDUME= +github.com/google/certificate-transparency-go v1.2.1/go.mod h1:bvn/ytAccv+I6+DGkqpvSsEdiVGramgaSC6RD3tEmeE= github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= @@ -945,8 +916,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-configfs-tsm v0.2.2 h1:YnJ9rXIOj5BYD7/0DNnzs8AOp7UcvjfTvt215EWcs98= github.com/google/go-configfs-tsm v0.2.2/go.mod h1:EL1GTDFMb5PZQWDviGfZV9n87WeGTR/JUg13RfwkgRo= -github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0= -github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= +github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= +github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -989,8 +960,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= -github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= @@ -1009,8 +980,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1025,8 +996,6 @@ github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDP github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= @@ -1079,10 +1048,10 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hashicorp/vault/api v1.14.0 h1:Ah3CFLixD5jmjusOgm8grfN9M0d+Y8fVR2SW0K6pJLU= -github.com/hashicorp/vault/api v1.14.0/go.mod h1:pV9YLxBGSz+cItFDd8Ii4G17waWOQ32zVjMWHe/cOqk= -github.com/hashicorp/vault/sdk v0.13.0 h1:UmcLF+7r70gy1igU44Suflgio30P2GOL4MkHPhJuiP8= -github.com/hashicorp/vault/sdk v0.13.0/go.mod h1:LxhNTWRG99mXg9xijBCnCnIus+brLC5uFsQUQ4zgOnU= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= +github.com/hashicorp/vault/sdk v0.14.0 h1:8vagjlpLurkFTnKT9aFSGs4U1XnK2IFytnWSxgFrDo0= +github.com/hashicorp/vault/sdk v0.14.0/go.mod h1:3hnGK5yjx3CW2hFyk+Dw1jDgKxdBvUvjyxMHhq0oUFc= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= @@ -1107,20 +1076,20 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= -github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs= +github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE= github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= -github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls= -github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k= +github.com/jhump/protoreflect v1.16.0 h1:54fZg+49widqXYQ0b+usAFHbMkBGR4PpXrsHc8+TBDg= +github.com/jhump/protoreflect v1.16.0/go.mod h1:oYPd7nPvcBw/5wlDfm/AVmU9zH9BgqGCI469pGxfj/8= github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o= github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -1144,13 +1113,12 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -1204,12 +1172,11 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= @@ -1231,8 +1198,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= -github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1250,7 +1217,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -1270,28 +1236,32 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= -github.com/open-policy-agent/opa v0.67.0 h1:FOdsO9yNhfmrh+72oVK7ImWmzruG+VSpfbr5IBqEWVs= -github.com/open-policy-agent/opa v0.67.0/go.mod h1:aqKlHc8E2VAAylYE9x09zJYr/fYzGX+JKne89UGqFzk= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/open-policy-agent/opa v0.68.0 h1:Jl3U2vXRjwk7JrHmS19U3HZO5qxQRinQbJ2eCJYSqJQ= +github.com/open-policy-agent/opa v0.68.0/go.mod h1:5E5SvaPwTpwt2WM177I9Z3eT7qUpmOGjk1ZdHs+TZ4w= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2weR6w= +github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1300,6 +1270,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1313,8 +1285,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= +github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1325,15 +1297,15 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.51.1 h1:eIjN50Bwglz6a/c3hAgSMcofL3nD+nFQkV6Dd4DsQCw= -github.com/prometheus/common v0.51.1/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk= github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= @@ -1369,22 +1341,26 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro= -github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y= -github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= -github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/cosign/v2 v2.4.0 h1:2NdidNgClg+oXr/fDIr37E/BE6j00gqgUhSiBK2kjSQ= +github.com/sigstore/cosign/v2 v2.4.0/go.mod h1:j+fH1DCUkcn92qp6ezDj4JbGMri6eG1nLJC+hs64rvc= +github.com/sigstore/fulcio v1.5.1 h1:Iasy1zfNjaq8BV4S8o6pXspLDU28PQC2z07GmOu9zpM= +github.com/sigstore/fulcio v1.5.1/go.mod h1:W1A/UHrTopy1IBZPMtHmxg7GPYAu+vt5dRXM3W6yjPo= +github.com/sigstore/protobuf-specs v0.3.2 h1:nCVARCN+fHjlNCk3ThNXwrZRqIommIeNKWwQvORuRQo= +github.com/sigstore/protobuf-specs v0.3.2/go.mod h1:RZ0uOdJR4OB3tLQeAyWoJFbNCBFrPQdcokntde4zRBA= github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.7 h1:L7/zKauHTg0d0Hukx7qlR4nifh6T6O6UIt9JBwAmTIg= -github.com/sigstore/sigstore v1.8.7/go.mod h1:MPiQ/NIV034Fc3Kk2IX9/XmBQdK60wfmpvgK9Z1UjRA= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc= +github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk= +github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w= +github.com/sigstore/sigstore-go v0.5.1 h1:5IhKvtjlQBeLnjKkzMELNG4tIBf+xXQkDzhLV77+/8Y= +github.com/sigstore/sigstore-go v0.5.1/go.mod h1:TuOfV7THHqiDaUHuJ5+QN23RP/YoKmsbwJpY+aaYPN0= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8 h1:2zHmUvaYCwV6LVeTo+OAkTm8ykOGzA9uFlAjwDPAUWM= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.8/go.mod h1:OEhheBplZinUsm7W9BupafztVZV3ldkAxEHbpAeC0Pk= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8 h1:RKk4Z+qMaLORUdT7zntwMqKiYAej1VQlCswg0S7xNSY= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.8/go.mod h1:dMJdlBWKHMu2xf0wIKpbo7+QfG+RzVkBB3nHP8EMM5o= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8 h1:89Xtxj8oqZt3UlSpCP4wApFvnQ2Z/dgowW5QOVhQigI= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.8/go.mod h1:Wa4xn/H3pU/yW/6tHiMXTpObBtBSGC5q29KYFEPKN6o= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8 h1:Zte3Oogkd8m+nu2oK3yHtGmN++TZWh2Lm6q2iSprT1M= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.8/go.mod h1:j00crVw6ki4/WViXflw0zWgNALrAzZT+GbIK8v7Xlz4= github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1395,11 +1371,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1415,18 +1386,17 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= github.com/spiffe/go-spiffe/v2 v2.3.0 h1:g2jYNb/PDMB8I7mBGL2Zuq/Ur6hUhoroxGQFyD6tTj8= github.com/spiffe/go-spiffe/v2 v2.3.0/go.mod h1:Oxsaio7DBgSNqhAO9i/9tLClaVlfRok7zvJnTV8ZyIY= -github.com/spiffe/spire-api-sdk v1.2.5-0.20240627195926-b5ac064f580b h1:k7ei1fQyt6+FbqDEAd90xaXLg52YuXueM+BRcoHZvEU= -github.com/spiffe/spire-api-sdk v1.2.5-0.20240627195926-b5ac064f580b/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= +github.com/spiffe/spire-api-sdk v1.2.5-0.20240807182354-18e423ce2c1c h1:lK/B2paDUiqbngUGsLxDBmNX/BsG2yKxS8W/iGT+x2c= +github.com/spiffe/spire-api-sdk v1.2.5-0.20240807182354-18e423ce2c1c/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= github.com/spiffe/spire-plugin-sdk v1.4.4-0.20230721151831-bf67dde4721d h1:LCRQGU6vOqKLfRrG+GJQrwMwDILcAddAEIf4/1PaSVc= github.com/spiffe/spire-plugin-sdk v1.4.4-0.20230721151831-bf67dde4721d/go.mod h1:GA6o2PVLwyJdevT6KKt5ZXCY/ziAPna13y/seGk49Ik= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -1455,9 +1425,10 @@ github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gt github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.0.0 h1:rD8d9RotYBprZVgC+9oyTZ5MmawepnTSTqoDuxjWgbs= +github.com/theupdateframework/go-tuf/v2 v2.0.0/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= -github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -1476,27 +1447,18 @@ github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXV github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= -github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4= -github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/go-gitlab v0.107.0 h1:P2CT9Uy9yN9lJo3FLxpMZ4xj6uWcpnigXsjvqJ6nd2Y= +github.com/xanzy/go-gitlab v0.107.0/go.mod h1:wKNKh3GkYDMOsGmnfuX+ITCmDuSDWFO0G+C4AygL9RY= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= -github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= -github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= -github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= -github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= -github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= -github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= -github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= -github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -1518,30 +1480,30 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.step.sm/crypto v0.44.2 h1:t3p3uQ7raP2jp2ha9P6xkQF85TJZh+87xmjSLaib+jk= -go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ= +go.step.sm/crypto v0.51.1 h1:ktUg/2hetEMiBAqgz502ktZDGoDoGrcHFg3XpkmkvvA= +go.step.sm/crypto v0.51.1/go.mod h1:PdrhttNU/tG9/YsVd4fdlysBN+UV503p0o2irFZQlAw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -1561,25 +1523,17 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1621,8 +1575,8 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1655,7 +1609,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1668,7 +1621,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1686,11 +1638,9 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1718,8 +1668,8 @@ golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1735,8 +1685,8 @@ golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1771,7 +1721,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1833,23 +1782,20 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1864,26 +1810,23 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1912,7 +1855,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1937,8 +1879,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2000,8 +1942,8 @@ google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7Twe google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.190.0 h1:ASM+IhLY1zljNdLu19W1jTmU6A+gMk6M46Wlur61s+Q= -google.golang.org/api v0.190.0/go.mod h1:QIr6I9iedBLnfqoD6L6Vze1UvS5Hzj5r2aUBOaZnLHo= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2127,12 +2069,12 @@ google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= -google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= +google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2174,8 +2116,8 @@ google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2199,14 +2141,14 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -2233,33 +2175,33 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws= -k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.30.3 h1:hy5zfQ7p6BuJgc/XtGp3GBh2MPfOj6b1n3raKKMHOQE= -k8s.io/kube-aggregator v0.30.3/go.mod h1:2SP0IckvQoOwwZN8lmtWUnTZTgIpwOWvidWtxyqLwuk= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= +k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-aggregator v0.31.1 h1:vrYBTTs3xMrpiEsmBjsLETZE9uuX67oQ8B3i1BFfMPw= +k8s.io/kube-aggregator v0.31.1/go.mod h1:+aW4NX50uneozN+BtoCxI4g7ND922p8Wy3tWKFDiWVk= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/mount-utils v0.30.3 h1:8Z3wSW5+GSvGNtlDhtoZrBCKLMIf5z/9tf8pie+G06s= -k8s.io/mount-utils v0.30.3/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/mount-utils v0.31.1 h1:f8UrH9kRynljmdNGM6BaCvFUON5ZPKDgE+ltmYqI4wA= +k8s.io/mount-utils v0.31.1/go.mod h1:HV/VYBUGqYUj4vt82YltzpWvgv8FPg0G9ItyInT3NPU= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU= -sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s= +sigs.k8s.io/release-utils v0.8.4 h1:4QVr3UgbyY/d9p74LBhg0njSVQofUsAZqYOzVZBhdBw= +sigs.k8s.io/release-utils v0.8.4/go.mod h1:m1bHfscTemQp+z+pLCZnkXih9n0+WukIUU70n6nFnU0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/pkg/agent/agent.go b/pkg/agent/agent.go index 433bad44173..1c7dc9050c4 100644 --- a/pkg/agent/agent.go +++ b/pkg/agent/agent.go @@ -17,13 +17,13 @@ import ( node_attestor "github.com/spiffe/spire/pkg/agent/attestor/node" workload_attestor "github.com/spiffe/spire/pkg/agent/attestor/workload" "github.com/spiffe/spire/pkg/agent/catalog" - "github.com/spiffe/spire/pkg/agent/common/backoff" "github.com/spiffe/spire/pkg/agent/endpoints" "github.com/spiffe/spire/pkg/agent/manager" "github.com/spiffe/spire/pkg/agent/manager/storecache" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor" "github.com/spiffe/spire/pkg/agent/storage" "github.com/spiffe/spire/pkg/agent/svid/store" + "github.com/spiffe/spire/pkg/common/backoff" "github.com/spiffe/spire/pkg/common/diskutil" "github.com/spiffe/spire/pkg/common/health" "github.com/spiffe/spire/pkg/common/nodeutil" @@ -44,8 +44,7 @@ const ( ) type Agent struct { - c *Config - sto storage.Storage + c *Config } // Run the agent @@ -57,8 +56,7 @@ func (a *Agent) Run(ctx context.Context) error { return err } - var err error - a.sto, err = storage.Open(a.c.DataDir) + sto, err := storage.Open(a.c.DataDir) if err != nil { return fmt.Errorf("failed to open storage: %w", err) } @@ -111,7 +109,7 @@ func (a *Agent) Run(ctx context.Context) error { ) for { - as, err = a.attest(ctx, a.sto, cat, metrics, nodeAttestor) + as, err = a.attest(ctx, sto, cat, metrics, nodeAttestor) if err == nil { break } @@ -138,7 +136,7 @@ func (a *Agent) Run(ctx context.Context) error { } } } else { - as, err = a.attest(ctx, a.sto, cat, metrics, nodeAttestor) + as, err = a.attest(ctx, sto, cat, metrics, nodeAttestor) if err != nil { return err } @@ -146,7 +144,7 @@ func (a *Agent) Run(ctx context.Context) error { svidStoreCache := a.newSVIDStoreCache() - manager, err := a.newManager(ctx, a.sto, cat, metrics, as, svidStoreCache, nodeAttestor) + manager, err := a.newManager(ctx, sto, cat, metrics, as, svidStoreCache, nodeAttestor) if err != nil { return err } @@ -391,14 +389,11 @@ func (a *Agent) waitForTestDial(ctx context.Context) error { // CheckHealth is used as a top-level health check for the agent. func (a *Agent) CheckHealth() health.State { - err := errors.Join( - a.checkWorkloadAPI(), - a.checkSVID(), - ) - - // Both liveness and readiness checks verify that: - // - the workload API endpoint is available - // - the agent has an SVID + err := a.checkWorkloadAPI() + + // Both liveness and readiness checks are done by + // agents ability to create new Workload API client + // for the X509SVID service. // TODO: Better live check for agent. return health.State{ Ready: err == nil, @@ -412,20 +407,6 @@ func (a *Agent) CheckHealth() health.State { } } -func (a *Agent) checkSVID() error { - if a.sto == nil { - return errors.New("storage not initialized") - } - svid, _, err := a.sto.LoadSVID() - if err != nil { - return fmt.Errorf("loading SVID: %w", err) - } - if svid == nil { - return errors.New("SVID is nil") - } - return nil -} - func (a *Agent) checkWorkloadAPI() error { clientOption, err := util.GetWorkloadAPIClientOption(a.c.BindAddress) if err != nil { diff --git a/pkg/agent/api/debug/v1/service.go b/pkg/agent/api/debug/v1/service.go index 16ae4feb4af..8ba5d376f28 100644 --- a/pkg/agent/api/debug/v1/service.go +++ b/pkg/agent/api/debug/v1/service.go @@ -95,10 +95,13 @@ func (s *Service) GetInfo(context.Context, *debugv1.GetInfoRequest) (*debugv1.Ge // Reset clock and set current response s.getInfoResp.ts = s.clock.Now() s.getInfoResp.resp = &debugv1.GetInfoResponse{ - SvidChain: svidChain, - Uptime: int32(s.uptime().Seconds()), - SvidsCount: int32(s.m.CountSVIDs()), - LastSyncSuccess: s.m.GetLastSync().UTC().Unix(), + SvidChain: svidChain, + Uptime: int32(s.uptime().Seconds()), + SvidsCount: int32(s.m.CountX509SVIDs()), + CachedX509SvidsCount: int32(s.m.CountX509SVIDs()), + CachedJwtSvidsCount: int32(s.m.CountJWTSVIDs()), + CachedSvidstoreX509SvidsCount: int32(s.m.CountSVIDStoreX509SVIDs()), + LastSyncSuccess: s.m.GetLastSync().UTC().Unix(), } } diff --git a/pkg/agent/api/debug/v1/service_test.go b/pkg/agent/api/debug/v1/service_test.go index 245801159b4..afeeced626a 100644 --- a/pkg/agent/api/debug/v1/service_test.go +++ b/pkg/agent/api/debug/v1/service_test.go @@ -99,45 +99,62 @@ func TestGetInfo(t *testing.T) { expectResp *debugv1.GetInfoResponse expectedLogs []spiretest.LogEntry // Time to add to clock.Mock - addToClk time.Duration - initCache bool - lastSync time.Time - svidCount int - svidState svid.State + addToClk time.Duration + initCache bool + lastSync time.Time + svidCount int + x509SvidCount int + jwtSvidCount int + svidstoreX509SvidCount int + svidState svid.State }{ { - name: "svid without intermediate", - lastSync: lastSync, - svidState: x509SVIDState, - svidCount: 123, + name: "svid without intermediate", + lastSync: lastSync, + svidState: x509SVIDState, + svidCount: 123, + x509SvidCount: 123, + jwtSvidCount: 123, + svidstoreX509SvidCount: 123, expectResp: &debugv1.GetInfoResponse{ - LastSyncSuccess: lastSync.UTC().Unix(), - SvidsCount: 123, - SvidChain: x509SVIDChain, + LastSyncSuccess: lastSync.UTC().Unix(), + SvidChain: x509SVIDChain, + SvidsCount: 123, + CachedX509SvidsCount: 123, + CachedJwtSvidsCount: 123, + CachedSvidstoreX509SvidsCount: 123, }, }, { - name: "svid with intermediate", - lastSync: lastSync, - svidState: stateWithIntermediate, - svidCount: 456, + name: "svid with intermediate", + lastSync: lastSync, + svidState: stateWithIntermediate, + svidCount: 456, + x509SvidCount: 456, + jwtSvidCount: 456, + svidstoreX509SvidCount: 456, expectResp: &debugv1.GetInfoResponse{ - LastSyncSuccess: lastSync.UTC().Unix(), - SvidsCount: 456, - SvidChain: svidWithIntermediateChain, + LastSyncSuccess: lastSync.UTC().Unix(), + SvidChain: svidWithIntermediateChain, + SvidsCount: 456, + CachedX509SvidsCount: 456, + CachedJwtSvidsCount: 456, + CachedSvidstoreX509SvidsCount: 456, }, }, { name: "get response from cache", expectResp: &debugv1.GetInfoResponse{ - LastSyncSuccess: cachedLastSync.Unix(), - SvidsCount: 99999, - SvidChain: x509SVIDChain, + LastSyncSuccess: cachedLastSync.Unix(), + SvidsCount: 99999, + CachedX509SvidsCount: 99999, + SvidChain: x509SVIDChain, }, - initCache: true, - lastSync: lastSync, - svidState: stateWithIntermediate, - svidCount: 456, + initCache: true, + lastSync: lastSync, + svidState: stateWithIntermediate, + svidCount: 253, + x509SvidCount: 253, }, { name: "expires cache", @@ -182,6 +199,7 @@ func TestGetInfo(t *testing.T) { // Set a success state before running actual test case and expire time if tt.initCache { test.m.svidCount = 99999 + test.m.x509SvidCount = 99999 test.m.svidState = x509SVIDState test.m.lastSync = cachedLastSync @@ -192,6 +210,9 @@ func TestGetInfo(t *testing.T) { test.clk.Add(tt.addToClk) test.m.svidCount = tt.svidCount + test.m.x509SvidCount = tt.x509SvidCount + test.m.jwtSvidCount = tt.jwtSvidCount + test.m.svidstoreX509SvidCount = tt.svidstoreX509SvidCount test.m.svidState = tt.svidState test.m.lastSync = tt.lastSync @@ -264,10 +285,13 @@ func setupServiceTest(t *testing.T) *serviceTest { type fakeManager struct { manager.Manager - bundle *cache.Bundle - svidState svid.State - svidCount int - lastSync time.Time + bundle *cache.Bundle + svidState svid.State + svidCount int + x509SvidCount int + jwtSvidCount int + svidstoreX509SvidCount int + lastSync time.Time } func (m *fakeManager) GetCurrentCredentials() svid.State { @@ -278,6 +302,18 @@ func (m *fakeManager) CountSVIDs() int { return m.svidCount } +func (m *fakeManager) CountX509SVIDs() int { + return m.x509SvidCount +} + +func (m *fakeManager) CountJWTSVIDs() int { + return m.jwtSvidCount +} + +func (m *fakeManager) CountSVIDStoreX509SVIDs() int { + return m.svidstoreX509SvidCount +} + func (m *fakeManager) GetLastSync() time.Time { return m.lastSync } diff --git a/pkg/agent/api/delegatedidentity/v1/service.go b/pkg/agent/api/delegatedidentity/v1/service.go index bec937487d7..073da398025 100644 --- a/pkg/agent/api/delegatedidentity/v1/service.go +++ b/pkg/agent/api/delegatedidentity/v1/service.go @@ -54,10 +54,11 @@ func New(config Config) *Service { } return &Service{ - manager: config.Manager, - attestor: endpoints.PeerTrackerAttestor{Attestor: config.Attestor}, - metrics: config.Metrics, - authorizedDelegates: AuthorizedDelegates, + manager: config.Manager, + peerAttestor: endpoints.PeerTrackerAttestor{Attestor: config.Attestor}, + delegateWorkloadAttestor: config.Attestor, + metrics: config.Metrics, + authorizedDelegates: AuthorizedDelegates, } } @@ -65,9 +66,10 @@ func New(config Config) *Service { type Service struct { delegatedidentityv1.UnsafeDelegatedIdentityServer - manager manager.Manager - attestor attestor - metrics telemetry.Metrics + manager manager.Manager + peerAttestor attestor + delegateWorkloadAttestor workloadattestor.Attestor + metrics telemetry.Metrics // SPIFFE IDs of delegates that are authorized to use this API authorizedDelegates map[string]bool @@ -79,7 +81,7 @@ func (s *Service) isCallerAuthorized(ctx context.Context, log logrus.FieldLogger callerSelectors := cachedSelectors if callerSelectors == nil { - callerSelectors, err = s.attestor.Attest(ctx) + callerSelectors, err = s.peerAttestor.Attest(ctx) if err != nil { log.WithError(err).Error("Workload attestation failed") return nil, status.Error(codes.Internal, "workload attestation failed") @@ -111,6 +113,53 @@ func (s *Service) isCallerAuthorized(ctx context.Context, log logrus.FieldLogger return nil, status.Error(codes.PermissionDenied, "caller not configured as an authorized delegate") } +func (s *Service) constructValidSelectorsFromReq(ctx context.Context, log logrus.FieldLogger, reqPid int32, reqSelectors []*types.Selector) ([]*common.Selector, error) { + // If you set + // - both pid and selector args + // - neither of them + // it's an error + // NOTE: the default value of int32 is naturally 0 in protobuf, which is also a valid PID. + // However, we will still treat that as an error, as we do not expect to ever be asked to attest + // pid 0. + + if (len(reqSelectors) != 0 && reqPid != 0) || (len(reqSelectors) == 0 && reqPid == 0) { + log.Error("Invalid argument; must provide either selectors or non-zero PID, but not both") + return nil, status.Error(codes.InvalidArgument, "must provide either selectors or non-zero PID, but not both") + } + + var selectors []*common.Selector + var err error + + if len(reqSelectors) != 0 { + // Delegate authorized, if the delegate gives us selectors, we treat them as attested. + selectors, err = api.SelectorsFromProto(reqSelectors) + if err != nil { + log.WithError(err).Error("Invalid argument; could not parse provided selectors") + return nil, status.Error(codes.InvalidArgument, "could not parse provided selectors") + } + } else { + // Delegate authorized, use PID the delegate gave us to try and attest on-behalf-of + selectors, err = s.delegateWorkloadAttestor.Attest(ctx, int(reqPid)) + if err != nil { + return nil, err + } + } + + return selectors, nil +} + +// Attempt to attest and authorize the delegate, and then +// +// - Take a pre-atttested set of selectors from the delegate +// - the PID the delegate gave us and attempt to attest that into a set of selectors +// +// and provide a SVID subscription for those selectors. +// +// NOTE: +// - If supplying a PID, the trusted delegate is responsible for ensuring the PID is valid and not recycled, +// from initiation of this call until the termination of the response stream, and if it is, +// must discard any stream contents provided by this call as invalid. +// - If supplying selectors, the trusted delegate is responsible for ensuring they are correct. func (s *Service) SubscribeToX509SVIDs(req *delegatedidentityv1.SubscribeToX509SVIDsRequest, stream delegatedidentityv1.DelegatedIdentity_SubscribeToX509SVIDsServer) error { latency := adminapi.StartFirstX509SVIDUpdateLatency(s.metrics) ctx := stream.Context() @@ -122,10 +171,9 @@ func (s *Service) SubscribeToX509SVIDs(req *delegatedidentityv1.SubscribeToX509S return err } - selectors, err := api.SelectorsFromProto(req.Selectors) + selectors, err := s.constructValidSelectorsFromReq(ctx, log, req.Pid, req.Selectors) if err != nil { - log.WithError(err).Error("Invalid argument; could not parse provided selectors") - return status.Error(codes.InvalidArgument, "could not parse provided selectors") + return err } log.WithFields(logrus.Fields{ @@ -290,6 +338,18 @@ func (s *Service) SubscribeToX509Bundles(_ *delegatedidentityv1.SubscribeToX509B } } +// Attempt to attest and authorize the delegate, and then +// +// - Take a pre-atttested set of selectors from the delegate +// - the PID the delegate gave us and attempt to attest that into a set of selectors +// +// and provide a JWT SVID for those selectors. +// +// NOTE: +// - If supplying a PID, the trusted delegate is responsible for ensuring the PID is valid and not recycled, +// from initiation of this call until the response is returned, and if it is, +// must discard any response provided by this call as invalid. +// - If supplying selectors, the trusted delegate is responsible for ensuring they are correct. func (s *Service) FetchJWTSVIDs(ctx context.Context, req *delegatedidentityv1.FetchJWTSVIDsRequest) (resp *delegatedidentityv1.FetchJWTSVIDsResponse, err error) { log := rpccontext.Logger(ctx) if len(req.Audience) == 0 { @@ -301,10 +361,9 @@ func (s *Service) FetchJWTSVIDs(ctx context.Context, req *delegatedidentityv1.Fe return nil, err } - selectors, err := api.SelectorsFromProto(req.Selectors) + selectors, err := s.constructValidSelectorsFromReq(ctx, log, req.Pid, req.Selectors) if err != nil { - log.WithError(err).Error("Invalid argument; could not parse provided selectors") - return nil, status.Error(codes.InvalidArgument, "could not parse provided selectors") + return nil, err } resp = new(delegatedidentityv1.FetchJWTSVIDsResponse) diff --git a/pkg/agent/api/delegatedidentity/v1/service_test.go b/pkg/agent/api/delegatedidentity/v1/service_test.go index d1df00596f7..ea1d9f68a9f 100644 --- a/pkg/agent/api/delegatedidentity/v1/service_test.go +++ b/pkg/agent/api/delegatedidentity/v1/service_test.go @@ -79,6 +79,7 @@ func TestSubscribeToX509SVIDs(t *testing.T) { managerErr error expectMetrics []fakemetrics.MetricItem expectResp *delegatedidentityv1.SubscribeToX509SVIDsResponse + req *delegatedidentityv1.SubscribeToX509SVIDsRequest }{ { testName: "attest error", @@ -86,6 +87,32 @@ func TestSubscribeToX509SVIDs(t *testing.T) { expectCode: codes.Internal, expectMsg: "workload attestation failed", }, + { + testName: "incorrectly populate both pid and selectors", + authSpiffeID: []string{"spiffe://example.org/one"}, + identities: []cache.Identity{ + identities[0], + }, + expectCode: codes.InvalidArgument, + expectMsg: "must provide either selectors or non-zero PID, but not both", + req: &delegatedidentityv1.SubscribeToX509SVIDsRequest{ + Selectors: []*types.Selector{{Type: "sa", Value: "foo"}}, + Pid: 447, + }, + }, + { + testName: "incorrectly populate neither pid or selectors", + authSpiffeID: []string{"spiffe://example.org/one"}, + identities: []cache.Identity{ + identities[0], + }, + expectCode: codes.InvalidArgument, + expectMsg: "must provide either selectors or non-zero PID, but not both", + req: &delegatedidentityv1.SubscribeToX509SVIDsRequest{ + Selectors: []*types.Selector{}, + Pid: 0, + }, + }, { testName: "access to \"privileged\" admin API denied", authSpiffeID: []string{"spiffe://example.org/one/wrong"}, @@ -194,6 +221,40 @@ func TestSubscribeToX509SVIDs(t *testing.T) { expectMsg: "could not serialize response", expectMetrics: generateSubscribeToX509SVIDMetrics(), }, + { + testName: "workload update by PID with identity and federated bundles", + authSpiffeID: []string{"spiffe://example.org/one"}, + req: &delegatedidentityv1.SubscribeToX509SVIDsRequest{ + Pid: 447, + }, + identities: []cache.Identity{ + identities[0], + }, + updates: []*cache.WorkloadUpdate{ + { + Identities: []cache.Identity{ + identities[0], + }, + Bundle: bundle, + FederatedBundles: map[spiffeid.TrustDomain]*spiffebundle.Bundle{ + federatedBundle1.TrustDomain(): federatedBundle1}, + }, + }, + expectResp: &delegatedidentityv1.SubscribeToX509SVIDsResponse{ + X509Svids: []*delegatedidentityv1.X509SVIDWithKey{ + { + X509Svid: &types.X509SVID{ + Id: utilIDProtoFromString(t, x509SVID1.ID.String()), + CertChain: x509util.RawCertsFromCertificates(x509SVID1.Certificates), + ExpiresAt: x509SVID1.Certificates[0].NotAfter.Unix(), + }, + X509SvidKey: pkcs8FromSigner(t, x509SVID1.PrivateKey), + }, + }, + FederatesWith: []string{federatedBundle1.TrustDomain().IDString()}, + }, + expectMetrics: generateSubscribeToX509SVIDMetrics(), + }, { testName: "workload update with identity and federated bundles", authSpiffeID: []string{"spiffe://example.org/one"}, @@ -271,22 +332,24 @@ func TestSubscribeToX509SVIDs(t *testing.T) { ManagerErr: tt.managerErr, Metrics: metrics, } - runTest(t, params, - func(ctx context.Context, client delegatedidentityv1.DelegatedIdentityClient) { - selectors := []*types.Selector{{Type: "sa", Value: "foo"}} - req := &delegatedidentityv1.SubscribeToX509SVIDsRequest{ - Selectors: selectors, - } - - stream, err := client.SubscribeToX509SVIDs(ctx, req) - - require.NoError(t, err) - resp, err := stream.Recv() - - spiretest.RequireGRPCStatus(t, err, tt.expectCode, tt.expectMsg) - spiretest.RequireProtoEqual(t, tt.expectResp, resp) - require.Equal(t, tt.expectMetrics, metrics.AllMetrics()) - }) + runTest(t, params, func(ctx context.Context, client delegatedidentityv1.DelegatedIdentityClient) { + req := &delegatedidentityv1.SubscribeToX509SVIDsRequest{ + Selectors: []*types.Selector{{Type: "sa", Value: "foo"}}, + } + // if test params has a custom request, prefer that + if tt.req != nil { + req = tt.req + } + + stream, err := client.SubscribeToX509SVIDs(ctx, req) + + require.NoError(t, err) + resp, err := stream.Recv() + + spiretest.RequireGRPCStatus(t, err, tt.expectCode, tt.expectMsg) + spiretest.RequireProtoEqual(t, tt.expectResp, resp) + require.Equal(t, tt.expectMetrics, metrics.AllMetrics()) + }) }) } } @@ -409,6 +472,7 @@ func TestFetchJWTSVIDs(t *testing.T) { authSpiffeID []string audience []string selectors []*types.Selector + pid int32 expectCode codes.Code expectMsg string attestErr error @@ -427,6 +491,30 @@ func TestFetchJWTSVIDs(t *testing.T) { expectCode: codes.Internal, expectMsg: "workload attestation failed", }, + { + testName: "incorrectly populate both pid and selectors", + authSpiffeID: []string{"spiffe://example.org/one"}, + selectors: []*types.Selector{{Type: "sa", Value: "foo"}}, + pid: 447, + audience: []string{"AUDIENCE"}, + identities: []cache.Identity{ + identities[0], + }, + expectCode: codes.InvalidArgument, + expectMsg: "must provide either selectors or non-zero PID, but not both", + }, + { + testName: "incorrectly populate neither pid or selectors", + authSpiffeID: []string{"spiffe://example.org/one"}, + selectors: []*types.Selector{}, + pid: 0, + audience: []string{"AUDIENCE"}, + identities: []cache.Identity{ + identities[0], + }, + expectCode: codes.InvalidArgument, + expectMsg: "must provide either selectors or non-zero PID, but not both", + }, { testName: "Access to \"privileged\" admin API denied", authSpiffeID: []string{"spiffe://example.org/one/wrong"}, @@ -509,6 +597,33 @@ func TestFetchJWTSVIDs(t *testing.T) { }, }, }, + { + testName: "success with one identity by PID", + pid: 447, + authSpiffeID: []string{"spiffe://example.org/one"}, + audience: []string{"AUDIENCE"}, + identities: []cache.Identity{ + identities[0], + }, + jwtSVIDsResp: map[spiffeid.ID]*client.JWTSVID{ + id1: { + Token: jwtSVID1Token, + ExpiresAt: time.Unix(1680786600, 0), + IssuedAt: time.Unix(1680783000, 0), + }, + }, + expectResp: &delegatedidentityv1.FetchJWTSVIDsResponse{ + Svids: []*types.JWTSVID{ + { + Token: jwtSVID1Token, + Id: api.ProtoFromID(id1), + Hint: "internal", + ExpiresAt: 1680786600, + IssuedAt: 1680783000, + }, + }, + }, + }, { testName: "success with two identities", authSpiffeID: []string{"spiffe://example.org/one"}, @@ -562,6 +677,7 @@ func TestFetchJWTSVIDs(t *testing.T) { resp, err := client.FetchJWTSVIDs(ctx, &delegatedidentityv1.FetchJWTSVIDsRequest{ Audience: tt.audience, Selectors: tt.selectors, + Pid: tt.pid, }) spiretest.RequireGRPCStatus(t, err, tt.expectCode, tt.expectMsg) @@ -578,6 +694,7 @@ func TestFetchJWTSVIDs(t *testing.T) { }) } } + func TestSubscribeToJWTBundles(t *testing.T) { ca := testca.New(t, trustDomain1) @@ -707,7 +824,11 @@ func runTest(t *testing.T, params testParams, fn func(ctx context.Context, clien AuthorizedDelegates: params.AuthSpiffeID, }) - service.attestor = FakeAttestor{ + service.peerAttestor = FakeAttestor{ + err: params.AttestErr, + } + + service.delegateWorkloadAttestor = FakeWorkloadPIDAttestor{ err: params.AttestErr, } @@ -739,6 +860,15 @@ func (fa FakeAttestor) Attest(context.Context) ([]*common.Selector, error) { return fa.selectors, fa.err } +type FakeWorkloadPIDAttestor struct { + selectors []*common.Selector + err error +} + +func (fa FakeWorkloadPIDAttestor) Attest(_ context.Context, _ int) ([]*common.Selector, error) { + return fa.selectors, fa.err +} + type FakeManager struct { manager.Manager diff --git a/pkg/agent/catalog/nodeattestor.go b/pkg/agent/catalog/nodeattestor.go index eee9a0ddda6..5db0ca8d264 100644 --- a/pkg/agent/catalog/nodeattestor.go +++ b/pkg/agent/catalog/nodeattestor.go @@ -5,6 +5,7 @@ import ( "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/awsiid" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/azuremsi" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/gcpiit" + "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/httpchallenge" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/jointoken" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/k8spsat" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/k8ssat" @@ -37,6 +38,7 @@ func (repo *nodeAttestorRepository) BuiltIns() []catalog.BuiltIn { awsiid.BuiltIn(), azuremsi.BuiltIn(), gcpiit.BuiltIn(), + httpchallenge.BuiltIn(), jointoken.BuiltIn(), k8spsat.BuiltIn(), k8ssat.BuiltIn(), diff --git a/pkg/agent/common/sigstore/config.go b/pkg/agent/common/sigstore/config.go new file mode 100644 index 00000000000..6a8852b2f67 --- /dev/null +++ b/pkg/agent/common/sigstore/config.go @@ -0,0 +1,98 @@ +package sigstore + +import "github.com/hashicorp/go-hclog" + +// Config holds configuration for the ImageVerifier. +type Config struct { + RekorURL string + RegistryCredentials map[string]*RegistryCredential + + AllowedIdentities map[string][]string + SkippedImages map[string]struct{} + IgnoreSCT bool + IgnoreTlog bool + IgnoreAttestations bool + + Logger hclog.Logger +} + +func NewConfig() *Config { + return &Config{ + AllowedIdentities: make(map[string][]string), + SkippedImages: make(map[string]struct{}), + } +} + +type HCLConfig struct { + // AllowedIdentities is a list of identities (issuer and subjects) that must match for the signature to be valid. + AllowedIdentities map[string][]string `hcl:"allowed_identities" json:"allowed_identities"` + + // SkippedImages is a list of images that should skip sigstore verification + SkippedImages []string `hcl:"skipped_images" json:"skipped_images"` + + // RekorURL is the URL for the Rekor transparency log server to use for verifying entries. + RekorURL *string `hcl:"rekor_url,omitempty" json:"rekor_url,omitempty"` + + // IgnoreSCT specifies whether to bypass the requirement for a Signed Certificate Timestamp (SCT) during verification. + // An SCT is proof of inclusion in a Certificate Transparency log. + IgnoreSCT *bool `hcl:"ignore_sct,omitempty" json:"ignore_sct,omitempty"` + + // IgnoreTlog specifies whether to bypass the requirement for transparency log verification during signature validation. + IgnoreTlog *bool `hcl:"ignore_tlog,omitempty" json:"ignore_tlog,omitempty"` + + // IgnoreAttestations specifies whether to bypass the image attestations verification. + IgnoreAttestations *bool `hcl:"ignore_attestations,omitempty" json:"ignore_attestations,omitempty"` + + // RegistryCredentials is a map of credentials keyed by registry URL + RegistryCredentials map[string]*RegistryCredential `hcl:"registry_credentials,omitempty" json:"registry_credentials,omitempty"` +} + +type RegistryCredential struct { + Username string `hcl:"username,omitempty" json:"username,omitempty"` + Password string `hcl:"password,omitempty" json:"password,omitempty"` +} + +func NewConfigFromHCL(hclConfig *HCLConfig, log hclog.Logger) *Config { + config := NewConfig() + config.Logger = log + + if hclConfig.AllowedIdentities != nil { + config.AllowedIdentities = hclConfig.AllowedIdentities + } + + if hclConfig.SkippedImages != nil { + config.SkippedImages = make(map[string]struct{}) + for _, image := range hclConfig.SkippedImages { + config.SkippedImages[image] = struct{}{} + } + } + + if hclConfig.RekorURL != nil { + config.RekorURL = *hclConfig.RekorURL + } + + if hclConfig.IgnoreSCT != nil { + config.IgnoreSCT = *hclConfig.IgnoreSCT + } + + if hclConfig.IgnoreTlog != nil { + config.IgnoreTlog = *hclConfig.IgnoreTlog + } + + if hclConfig.IgnoreAttestations != nil { + config.IgnoreAttestations = *hclConfig.IgnoreAttestations + } + + if hclConfig.RegistryCredentials != nil { + m := make(map[string]*RegistryCredential) + for k, v := range hclConfig.RegistryCredentials { + m[k] = &RegistryCredential{ + Username: v.Username, + Password: v.Password, + } + } + config.RegistryCredentials = m + } + + return config +} diff --git a/pkg/agent/common/sigstore/config_test.go b/pkg/agent/common/sigstore/config_test.go new file mode 100644 index 00000000000..9188a89c42c --- /dev/null +++ b/pkg/agent/common/sigstore/config_test.go @@ -0,0 +1,85 @@ +package sigstore + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" +) + +func TestNewConfigFromHCL(t *testing.T) { + tests := []struct { + name string + hcl *HCLConfig + want *Config + }{ + { + name: "complete sigstore configuration", + hcl: &HCLConfig{ + AllowedIdentities: map[string][]string{ + "test-issuer-1": {"*@example.com", "subject@otherdomain.com"}, + "test-issuer-2": {"domain/ci.yaml@refs/tags/*"}, + }, + SkippedImages: []string{"registry/image@sha256:examplehash"}, + RekorURL: strPtr("https://test.dev"), + IgnoreSCT: boolPtr(true), + IgnoreTlog: boolPtr(true), + IgnoreAttestations: boolPtr(true), + RegistryCredentials: map[string]*RegistryCredential{ + "registry": { + Username: "user", + Password: "pass", + }, + }, + }, + want: &Config{ + AllowedIdentities: map[string][]string{ + "test-issuer-1": {"*@example.com", "subject@otherdomain.com"}, + "test-issuer-2": {"domain/ci.yaml@refs/tags/*"}, + }, + SkippedImages: map[string]struct{}{"registry/image@sha256:examplehash": {}}, + RekorURL: "https://test.dev", + IgnoreSCT: true, + IgnoreTlog: true, + IgnoreAttestations: true, + RegistryCredentials: map[string]*RegistryCredential{ + "registry": { + Username: "user", + Password: "pass", + }, + }, + Logger: hclog.NewNullLogger(), + }, + }, + { + name: "empty sigstore configuration", + hcl: &HCLConfig{}, + want: &Config{ + AllowedIdentities: map[string][]string{}, + SkippedImages: map[string]struct{}{}, + RekorURL: "", + IgnoreSCT: false, + IgnoreTlog: false, + IgnoreAttestations: false, + RegistryCredentials: nil, + Logger: hclog.NewNullLogger(), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := hclog.NewNullLogger() + got := NewConfigFromHCL(tt.hcl, log) + assert.Equal(t, tt.want, got) + }) + } +} + +func strPtr(s string) *string { + return &s +} + +func boolPtr(b bool) *bool { + return &b +} diff --git a/pkg/agent/common/sigstore/sigstore.go b/pkg/agent/common/sigstore/sigstore.go new file mode 100644 index 00000000000..3513b2fb0e8 --- /dev/null +++ b/pkg/agent/common/sigstore/sigstore.go @@ -0,0 +1,453 @@ +package sigstore + +import ( + "context" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/hashicorp/go-hclog" + "github.com/sigstore/cosign/v2/pkg/cosign" + "github.com/sigstore/cosign/v2/pkg/oci" + cosignremote "github.com/sigstore/cosign/v2/pkg/oci/remote" + "github.com/sigstore/rekor/pkg/client" + rekorclient "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/fulcioroots" + "github.com/spiffe/spire/pkg/common/telemetry" +) + +const ( + imageSignatureVerifiedSelector = "image-signature:verified" + imageAttestationsVerifiedSelector = "image-attestations:verified" + publicRekorURL = "https://rekor.sigstore.dev" +) + +var ( + oidcIssuerOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} +) + +type Verifier interface { + // Verify verifies an image and returns a list of selectors. + Verify(ctx context.Context, imageID string) ([]string, error) +} + +// ImageVerifier implements the Verifier interface. +type ImageVerifier struct { + config *Config + + verificationCache sync.Map + allowedIdentities []cosign.Identity + authOptions map[string]remote.Option + + rekorClient *rekorclient.Rekor + fulcioRoots *x509.CertPool + fulcioIntermediates *x509.CertPool + rekorPublicKeys *cosign.TrustedTransparencyLogPubKeys + ctLogPublicKeys *cosign.TrustedTransparencyLogPubKeys + + sigstoreFunctions sigstoreFunctions +} + +type sigstoreFunctions struct { + verifyImageSignatures cosignVerifyFn + verifyImageAttestations cosignVerifyFn + getRekorClient getRekorClientFn + getFulcioRoots getCertPoolFn + getFulcioIntermediates getCertPoolFn + getRekorPublicKeys getTLogPublicKeysFn + getCTLogPublicKeys getTLogPublicKeysFn +} + +type cosignVerifyFn func(context.Context, name.Reference, *cosign.CheckOpts) ([]oci.Signature, bool, error) +type getRekorClientFn func(string, ...client.Option) (*rekorclient.Rekor, error) +type getCertPoolFn func() (*x509.CertPool, error) +type getTLogPublicKeysFn func(context.Context) (*cosign.TrustedTransparencyLogPubKeys, error) + +func NewVerifier(config *Config) *ImageVerifier { + verifier := &ImageVerifier{ + config: config, + authOptions: processRegistryCredentials(config.RegistryCredentials, config.Logger), + sigstoreFunctions: sigstoreFunctions{ + verifyImageSignatures: cosign.VerifyImageSignatures, + verifyImageAttestations: cosign.VerifyImageAttestations, + getRekorClient: client.GetRekorClient, + getFulcioRoots: fulcioroots.Get, + getFulcioIntermediates: fulcioroots.GetIntermediates, + getRekorPublicKeys: cosign.GetRekorPubs, + getCTLogPublicKeys: cosign.GetCTLogPubs, + }, + } + + if verifier.config.Logger == nil { + verifier.config.Logger = hclog.Default() + } + + if verifier.config.RekorURL == "" { + verifier.config.RekorURL = publicRekorURL + } + + verifier.allowedIdentities = processAllowedIdentities(config.AllowedIdentities) + + return verifier +} + +// Init prepares the verifier by retrieving the Fulcio certificates and Rekor and CT public keys. +func (v *ImageVerifier) Init(ctx context.Context) error { + var err error + v.fulcioRoots, err = v.sigstoreFunctions.getFulcioRoots() + if err != nil { + return fmt.Errorf("failed to get fulcio root certificates: %w", err) + } + + v.fulcioIntermediates, err = v.sigstoreFunctions.getFulcioIntermediates() + if err != nil { + return fmt.Errorf("failed to get fulcio intermediate certificates: %w", err) + } + + if !v.config.IgnoreTlog { + v.rekorPublicKeys, err = v.sigstoreFunctions.getRekorPublicKeys(ctx) + if err != nil { + return fmt.Errorf("failed to get rekor public keys: %w", err) + } + v.rekorClient, err = v.sigstoreFunctions.getRekorClient(v.config.RekorURL, client.WithLogger(v.config.Logger)) + if err != nil { + return fmt.Errorf("failed to get rekor client: %w", err) + } + } + + if !v.config.IgnoreSCT { + v.ctLogPublicKeys, err = v.sigstoreFunctions.getCTLogPublicKeys(ctx) + if err != nil { + return fmt.Errorf("failed to get CT log public keys: %w", err) + } + } + + return nil +} + +// Verify validates image's signatures, attestations, and transparency logs using Cosign and Rekor. +// The imageID parameter is expected to be in the format "repository@sha256:digest". +// It returns selectors based on the image signature and rekor bundle details. +// Cosign ensures the image's signature issuer and subject match the configured allowed identities. +// If the image is in the skip list, it bypasses verification and returns an empty list of selectors. +// Uses a cache to avoid redundant verifications. +// An error is returned if the verification of the images signatures or attestations fails. +func (v *ImageVerifier) Verify(ctx context.Context, imageID string) ([]string, error) { + v.config.Logger.Debug("Verifying image with sigstore", telemetry.ImageID, imageID) + + // Check if the image is in the list of excluded images to determine if verification should be bypassed. + if _, ok := v.config.SkippedImages[imageID]; ok { + // Return an empty list, indicating no verification was performed. + return []string{}, nil + } + + // Check the cache for previously verified selectors. + if cachedSelectors, ok := v.verificationCache.Load(imageID); ok { + if cachedSelectors != nil { + v.config.Logger.Debug("Sigstore verifier cache hit", telemetry.ImageID, imageID) + return cachedSelectors.([]string), nil + } + } + + imageRef, err := name.ParseReference(imageID) + if err != nil { + return nil, fmt.Errorf("failed to parse image reference: %w", err) + } + + registryURL := imageRef.Context().RegistryStr() + authOption, exists := v.authOptions[registryURL] + if !exists { + authOption = remote.WithAuthFromKeychain(authn.DefaultKeychain) + } + + checkOptions := &cosign.CheckOpts{ + RekorClient: v.rekorClient, + RootCerts: v.fulcioRoots, + IntermediateCerts: v.fulcioIntermediates, + RekorPubKeys: v.rekorPublicKeys, + CTLogPubKeys: v.ctLogPublicKeys, + Identities: v.allowedIdentities, + IgnoreSCT: v.config.IgnoreSCT, + IgnoreTlog: v.config.IgnoreTlog, + RegistryClientOpts: []cosignremote.Option{cosignremote.WithRemoteOptions(authOption)}, + } + + signatures, err := v.verifySignatures(ctx, imageRef, checkOptions) + if err != nil { + return nil, err + } + + selectors := []string{imageSignatureVerifiedSelector} + + if !v.config.IgnoreAttestations { + attestations, err := v.verifyAttestations(ctx, imageRef, checkOptions) + if err != nil { + return nil, err + } + if len(attestations) > 0 { + selectors = append(selectors, imageAttestationsVerifiedSelector) + } + } + + detailsList, err := v.extractDetailsFromSignatures(signatures) + if err != nil { + return nil, fmt.Errorf("failed to extract details from signatures for image %q: %w", imageID, err) + } + + selectors = append(selectors, formatDetailsAsSelectors(detailsList)...) + + v.verificationCache.Store(imageID, selectors) + + return selectors, nil +} + +func (v *ImageVerifier) verifySignatures(ctx context.Context, imageRef name.Reference, checkOptions *cosign.CheckOpts) ([]oci.Signature, error) { + v.config.Logger.Debug("Verifying image signatures", telemetry.ImageID, imageRef.Name()) + + // Verify the image's signatures using cosign.VerifySignatures + signatures, bundleVerified, err := v.sigstoreFunctions.verifyImageSignatures(ctx, imageRef, checkOptions) + if err != nil { + return nil, fmt.Errorf("failed to verify signatures: %w", err) + } + if !bundleVerified && !v.config.IgnoreTlog { + return nil, fmt.Errorf("rekor bundle not verified for image: %s", imageRef.Name()) + } + if len(signatures) == 0 { + return nil, fmt.Errorf("no verified signature returned by cosign for image: %s", imageRef.Name()) + } + + return signatures, nil +} + +func (v *ImageVerifier) verifyAttestations(ctx context.Context, imageRef name.Reference, checkOptions *cosign.CheckOpts) ([]oci.Signature, error) { + v.config.Logger.Debug("Verifying image attestations", telemetry.ImageID, imageRef.Name()) + + // Verify the image's attestations using cosign.VerifyImageAttestations + attestations, bundleVerified, err := v.sigstoreFunctions.verifyImageAttestations(ctx, imageRef, checkOptions) + if err != nil { + return nil, fmt.Errorf("failed to verify image attestations: %w", err) + } + if len(attestations) > 0 && !bundleVerified && !v.config.IgnoreTlog { + return nil, fmt.Errorf("rekor bundle not verified for image: %s", imageRef.Name()) + } + + return attestations, nil +} + +func (v *ImageVerifier) extractDetailsFromSignatures(signatures []oci.Signature) ([]*signatureDetails, error) { + var detailsList []*signatureDetails + for _, signature := range signatures { + details, err := extractSignatureDetails(signature, v.config.IgnoreTlog) + if err != nil { + return nil, err + } + detailsList = append(detailsList, details) + } + return detailsList, nil +} + +func extractSignatureDetails(signature oci.Signature, ignoreTlog bool) (*signatureDetails, error) { + cert, err := getCertificate(signature) + if err != nil { + return nil, fmt.Errorf("failed to get certificate from signature: %w", err) + } + + subject, err := extractSubject(cert) + if err != nil { + return nil, fmt.Errorf("failed to extract subject from certificate: %w", err) + } + + issuer, err := extractIssuer(cert) + if err != nil { + return nil, fmt.Errorf("failed to extract issuer from certificate: %w", err) + } + + base64Signature, err := signature.Base64Signature() + if err != nil { + return nil, fmt.Errorf("failed to extract base64 signature from certificate: %w", err) + } + + var logIndex string + var logID string + var signedEntryTimestamp string + var integratedTime string + if !ignoreTlog { + rekorBundle, err := signature.Bundle() + if err != nil { + return nil, fmt.Errorf("failed to get signature rekor bundle: %w", err) + } + + logID = rekorBundle.Payload.LogID + logIndex = strconv.FormatInt(rekorBundle.Payload.LogIndex, 10) + integratedTime = strconv.FormatInt(rekorBundle.Payload.IntegratedTime, 10) + signedEntryTimestamp = base64.StdEncoding.EncodeToString(rekorBundle.SignedEntryTimestamp) + } + + return &signatureDetails{ + Subject: subject, + Issuer: issuer, + Signature: base64Signature, + LogID: logID, + LogIndex: logIndex, + IntegratedTime: integratedTime, + SignedEntryTimestamp: signedEntryTimestamp, + }, nil +} + +func getCertificate(signature oci.Signature) (*x509.Certificate, error) { + if signature == nil { + return nil, errors.New("signature is nil") + } + cert, err := signature.Cert() + if err != nil { + return nil, fmt.Errorf("failed to access signature certificate: %w", err) + } + if cert == nil { + return nil, errors.New("no certificate found in signature") + } + return cert, nil +} + +func extractSubject(cert *x509.Certificate) (string, error) { + if cert == nil { + return "", errors.New("certificate is nil") + } + + subjectAltNames := cryptoutils.GetSubjectAlternateNames(cert) + if len(subjectAltNames) == 0 { + return "", errors.New("no subject found in certificate") + } + + for _, san := range subjectAltNames { + if san != "" { + return san, nil + } + } + + return "", errors.New("subject alternative names are present but all are empty") +} + +func extractIssuer(cert *x509.Certificate) (string, error) { + if cert == nil { + return "", errors.New("certificate is nil") + } + + for _, ext := range cert.Extensions { + if ext.Id.Equal(oidcIssuerOID) { + issuer := string(ext.Value) + if issuer == "" { + return "", errors.New("OIDC issuer extension is present but empty") + } + return issuer, nil + } + } + + return "", errors.New("no OIDC issuer found in certificate extensions") +} + +type signatureDetails struct { + Subject string + Issuer string + Signature string + LogID string + LogIndex string + IntegratedTime string + SignedEntryTimestamp string +} + +func formatDetailsAsSelectors(detailsList []*signatureDetails) []string { + var selectors []string + for _, details := range detailsList { + selectors = append(selectors, detailsToSelectors(details)...) + } + return selectors +} + +func detailsToSelectors(details *signatureDetails) []string { + var selectors []string + if details.Subject != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-subject:%s", details.Subject)) + } + if details.Issuer != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-issuer:%s", details.Issuer)) + } + if details.Signature != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-value:%s", details.Signature)) + } + if details.LogID != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-log-id:%s", details.LogID)) + } + if details.LogIndex != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-log-index:%s", details.LogIndex)) + } + if details.IntegratedTime != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-integrated-time:%s", details.IntegratedTime)) + } + if details.SignedEntryTimestamp != "" { + selectors = append(selectors, fmt.Sprintf("image-signature-signed-entry-timestamp:%s", details.SignedEntryTimestamp)) + } + return selectors +} + +func processRegistryCredentials(credentials map[string]*RegistryCredential, logger hclog.Logger) map[string]remote.Option { + authOptions := make(map[string]remote.Option) + + for registry, creds := range credentials { + if creds == nil { + continue + } + + usernameProvided := creds.Username != "" + passwordProvided := creds.Password != "" + + if usernameProvided && passwordProvided { + authOption := remote.WithAuth(&authn.Basic{ + Username: creds.Username, + Password: creds.Password, + }) + authOptions[registry] = authOption + } else if usernameProvided || passwordProvided { + logger.Warn("Incomplete credentials for registry %q. Both username and password must be provided.", registry) + } + } + + return authOptions +} + +func processAllowedIdentities(allowedIdentities map[string][]string) []cosign.Identity { + var identities []cosign.Identity + for issuer, subjects := range allowedIdentities { + for _, subject := range subjects { + identity := cosign.Identity{} + + if containsRegexChars(issuer) { + identity.IssuerRegExp = issuer + } else { + identity.Issuer = issuer + } + + if containsRegexChars(subject) { + identity.SubjectRegExp = subject + } else { + identity.Subject = subject + } + + identities = append(identities, identity) + } + } + return identities +} + +func containsRegexChars(s string) bool { + // check for characters commonly used in regex. + return strings.ContainsAny(s, "*+?^${}[]|()") +} diff --git a/pkg/agent/common/sigstore/sigstore_test.go b/pkg/agent/common/sigstore/sigstore_test.go new file mode 100644 index 00000000000..7d516bb93fd --- /dev/null +++ b/pkg/agent/common/sigstore/sigstore_test.go @@ -0,0 +1,747 @@ +package sigstore + +import ( + "context" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "testing" + "time" + + "github.com/google/go-containerregistry/pkg/name" + _ "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/hashicorp/go-hclog" + "github.com/sigstore/cosign/v2/pkg/cosign" + "github.com/sigstore/cosign/v2/pkg/cosign/bundle" + "github.com/sigstore/cosign/v2/pkg/oci" + "github.com/sigstore/rekor/pkg/client" + rekorclient "github.com/sigstore/rekor/pkg/generated/client" + "github.com/sigstore/sigstore/pkg/signature/payload" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewVerifier(t *testing.T) { + config := NewConfig() + config.Logger = hclog.NewNullLogger() + config.IgnoreSCT = true + config.IgnoreTlog = true + config.IgnoreAttestations = true + config.RekorURL = "https://rekor.test.com" + config.RegistryCredentials = map[string]*RegistryCredential{ + "docker.io": { + Username: "testuser", + Password: "testpassword", + }, + "other.io": { + Username: "testuser", + Password: "testpassword", + }, + } + + config.SkippedImages = map[string]struct{}{ + "test-image-1": {}, + "test-image-2": {}, + } + config.AllowedIdentities = map[string][]string{ + "test-issuer": {"test-subject"}, + "test-issuer-2*": {"test-subject-2*"}, + } + + verifier := NewVerifier(config) + require.NotNil(t, verifier) + + identityPlainValues := cosign.Identity{ + Issuer: "test-issuer", + Subject: "test-subject", + } + identityRegExp := cosign.Identity{ + IssuerRegExp: "test-issuer-2*", + SubjectRegExp: "test-subject-2*", + } + expectedIdentites := []cosign.Identity{identityPlainValues, identityRegExp} + + assert.Equal(t, config, verifier.config) + assert.Equal(t, len(config.RegistryCredentials), len(verifier.authOptions)) + assert.ElementsMatch(t, expectedIdentites, verifier.allowedIdentities) + assert.NotNil(t, verifier.sigstoreFunctions.verifyImageSignatures) + assert.NotNil(t, verifier.sigstoreFunctions.verifyImageAttestations) + assert.NotNil(t, verifier.sigstoreFunctions.getRekorClient) + assert.NotNil(t, verifier.sigstoreFunctions.getFulcioRoots) + assert.NotNil(t, verifier.sigstoreFunctions.getFulcioIntermediates) + assert.NotNil(t, verifier.sigstoreFunctions.getRekorPublicKeys) + assert.NotNil(t, verifier.sigstoreFunctions.getCTLogPublicKeys) +} + +func TestInitialize(t *testing.T) { + verifierSetup := setupVerifier() + + ctx := context.Background() + expectedRoots := x509.NewCertPool() + expectedIntermediates := x509.NewCertPool() + expectedRekorPubs := &cosign.TrustedTransparencyLogPubKeys{} + expectedCTLogPubs := &cosign.TrustedTransparencyLogPubKeys{} + expectedRekorClient := &rekorclient.Rekor{} + + verifierSetup.fakeGetFulcioRoots.Response.Roots = expectedRoots + verifierSetup.fakeGetFulcioRoots.Response.Err = nil + verifierSetup.fakeGetFulcioIntermediates.Response.Intermediates = expectedIntermediates + verifierSetup.fakeGetFulcioIntermediates.Response.Err = nil + verifierSetup.fakeGetRekorPubs.Response.PubKeys = expectedRekorPubs + verifierSetup.fakeGetRekorPubs.Response.Err = nil + verifierSetup.fakeGetCTLogPubs.Response.PubKeys = expectedCTLogPubs + verifierSetup.fakeGetCTLogPubs.Response.Err = nil + verifierSetup.fakeGetRekorClient.Response.Client = expectedRekorClient + verifierSetup.fakeGetRekorClient.Response.Err = nil + + // Act + err := verifierSetup.verifier.Init(ctx) + require.NoError(t, err) + + // Assert + assert.Equal(t, expectedRoots, verifierSetup.verifier.fulcioRoots) + assert.Equal(t, expectedIntermediates, verifierSetup.verifier.fulcioIntermediates) + assert.Equal(t, expectedRekorPubs, verifierSetup.verifier.rekorPublicKeys) + assert.Equal(t, expectedCTLogPubs, verifierSetup.verifier.ctLogPublicKeys) + assert.Equal(t, expectedRekorClient, verifierSetup.verifier.rekorClient) + + assert.Equal(t, 1, verifierSetup.fakeGetFulcioRoots.CallCount) + assert.Equal(t, 1, verifierSetup.fakeGetFulcioIntermediates.CallCount) + assert.Equal(t, 1, verifierSetup.fakeGetRekorPubs.CallCount) + assert.Equal(t, 1, verifierSetup.fakeGetCTLogPubs.CallCount) + assert.Equal(t, 1, verifierSetup.fakeGetRekorClient.CallCount) +} + +func TestVerify(t *testing.T) { + t.Parallel() + + manifest := []byte(`{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest", + }`) + hash := sha256.Sum256(manifest) + digest := "sha256:" + hex.EncodeToString(hash[:]) + imageID := fmt.Sprintf("test-id@%s", digest) + + tests := []struct { + name string + configureTest func(ctx context.Context, verifier *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) + expectedSelectors []string + expectedError bool + expectedVerifyCallCount int + expectedAttestationsCallCount int + }{ + { + name: "generates selectors from verified signature, rekor bundle, and attestations", + configureTest: func(ctx context.Context, _ *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) { + signature := &fakeSignature{ + payload: createFakePayload(), + base64Signature: "base64signature", + cert: createTestCert(), + bundle: createFakeBundle(), + } + + signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: true, + Err: nil, + }) + attestationsVerifyFake.Responses = append(attestationsVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: true, + Err: nil, + }) + }, + expectedSelectors: []string{ + imageSignatureVerifiedSelector, + imageAttestationsVerifiedSelector, + "image-signature-subject:test-subject-san", + "image-signature-issuer:test-issuer", + "image-signature-value:base64signature", + "image-signature-log-id:test-log-id", + "image-signature-log-index:9876543210", + "image-signature-integrated-time:1234567890", + fmt.Sprintf("image-signature-signed-entry-timestamp:%s", base64.StdEncoding.EncodeToString([]byte("test-signed-timestamp"))), + }, + expectedError: false, + expectedVerifyCallCount: 1, + expectedAttestationsCallCount: 1, + }, + { + name: "generates selectors from verified signature and bundle, but ignore attestations", + configureTest: func(ctx context.Context, verifier *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, _ *fakeCosignVerifyAttestationsFn) { + verifier.config.IgnoreAttestations = true + + signature := &fakeSignature{ + payload: createFakePayload(), + base64Signature: "base64signature", + cert: createTestCert(), + bundle: createFakeBundle(), + } + + signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: true, + Err: nil, + }) + }, + expectedSelectors: []string{ + imageSignatureVerifiedSelector, + "image-signature-subject:test-subject-san", + "image-signature-issuer:test-issuer", + "image-signature-value:base64signature", + "image-signature-log-id:test-log-id", + "image-signature-log-index:9876543210", + "image-signature-integrated-time:1234567890", + fmt.Sprintf("image-signature-signed-entry-timestamp:%s", base64.StdEncoding.EncodeToString([]byte("test-signed-timestamp"))), + }, + expectedError: false, + expectedVerifyCallCount: 1, + expectedAttestationsCallCount: 0, + }, + { + name: "tlog is set to ignore, not generate selectors from bundle", + configureTest: func(ctx context.Context, verifier *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) { + verifier.config.IgnoreTlog = true + + signature := &fakeSignature{ + payload: createFakePayload(), + base64Signature: "base64signature", + cert: createTestCert(), + bundle: createFakeBundle(), + } + + signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: false, + Err: nil, + }) + attestationsVerifyFake.Responses = append(attestationsVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: true, + Err: nil, + }) + }, + expectedSelectors: []string{ + imageSignatureVerifiedSelector, + imageAttestationsVerifiedSelector, + "image-signature-subject:test-subject-san", + "image-signature-issuer:test-issuer", + "image-signature-value:base64signature", + }, + expectedError: false, + expectedVerifyCallCount: 1, + expectedAttestationsCallCount: 1, + }, + { + name: "tlog is not ignored, verification returns bundle not verified and causes error", + configureTest: func(ctx context.Context, verifier *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, _ *fakeCosignVerifyAttestationsFn) { + verifier.config.IgnoreTlog = false // make explicit that is not ignored + + signature := &fakeSignature{ + payload: createFakePayload(), + base64Signature: "base64signature", + cert: createTestCert(), + bundle: createFakeBundle(), + } + + signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: false, + Err: nil, + }) + }, + expectedSelectors: nil, + expectedError: true, + expectedVerifyCallCount: 1, + expectedAttestationsCallCount: 0, + }, + { + name: "fails to verify signature", + configureTest: func(ctx context.Context, _ *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, _ *fakeCosignVerifyAttestationsFn) { + signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: nil, + BundleVerified: false, + Err: errors.New("failed to verify signature"), + }) + }, + expectedSelectors: nil, + expectedError: true, + expectedVerifyCallCount: 1, + expectedAttestationsCallCount: 0, + }, + { + name: "fails to verify attestations", + configureTest: func(ctx context.Context, _ *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) { + signature := &fakeSignature{ + payload: createFakePayload(), + base64Signature: "base64signature", + cert: createTestCert(), + bundle: createFakeBundle(), + } + + signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: []oci.Signature{signature}, + BundleVerified: true, + Err: nil, + }) + attestationsVerifyFake.Responses = append(attestationsVerifyFake.Responses, struct { + Signatures []oci.Signature + BundleVerified bool + Err error + }{ + Signatures: nil, + BundleVerified: false, + Err: errors.New("failed to verify attestations"), + }) + }, + expectedSelectors: nil, + expectedError: true, + expectedVerifyCallCount: 1, + expectedAttestationsCallCount: 1, + }, + { + name: "cache hit", + configureTest: func(ctx context.Context, verifier *ImageVerifier, _ *fakeCosignVerifySignatureFn, _ *fakeCosignVerifyAttestationsFn) { + verifier.verificationCache.Store(imageID, []string{ + imageSignatureVerifiedSelector, + imageAttestationsVerifiedSelector, + "image-signature-subject:some-test-subject", + "image-signature-issuer:some-test-issuer", + "image-signature-value:base64signature", + "image-signature-log-id:test-log-id", + "image-signature-integrated-time:1234567890", + fmt.Sprintf("image-signature-signed-entry-timestamp:%s", base64.StdEncoding.EncodeToString([]byte("test-signed-timestamp"))), + }) + }, + expectedSelectors: []string{ + imageSignatureVerifiedSelector, + imageAttestationsVerifiedSelector, + "image-signature-subject:some-test-subject", + "image-signature-issuer:some-test-issuer", + "image-signature-value:base64signature", + "image-signature-log-id:test-log-id", + "image-signature-integrated-time:1234567890", + fmt.Sprintf("image-signature-signed-entry-timestamp:%s", base64.StdEncoding.EncodeToString([]byte("test-signed-timestamp"))), + }, + expectedError: false, + expectedVerifyCallCount: 0, + expectedAttestationsCallCount: 0, + }, + { + name: "imageID is in the skipped images list", + configureTest: func(ctx context.Context, verifier *ImageVerifier, _ *fakeCosignVerifySignatureFn, _ *fakeCosignVerifyAttestationsFn) { + verifier.config.SkippedImages = map[string]struct{}{imageID: {}} + }, + expectedSelectors: []string{}, + expectedError: false, + expectedVerifyCallCount: 0, + expectedAttestationsCallCount: 0, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + verifierSetup := setupVerifier() + tt.configureTest(ctx, verifierSetup.verifier, verifierSetup.fakeCosignVerifySignature, verifierSetup.fakeCosignVerifyAttestations) + + selectors, err := verifierSetup.verifier.Verify(ctx, imageID) + + assert.Equal(t, tt.expectedVerifyCallCount, verifierSetup.fakeCosignVerifySignature.CallCount) + assert.Equal(t, tt.expectedAttestationsCallCount, verifierSetup.fakeCosignVerifyAttestations.CallCount) + + if tt.expectedError { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + assert.ElementsMatch(t, tt.expectedSelectors, selectors) + }) + } +} + +func TestProcessAllowedIdentities(t *testing.T) { + tests := []struct { + name string + allowedIdentities map[string][]string + expected []cosign.Identity + }{ + { + name: "plain strings", + allowedIdentities: map[string][]string{ + "test-issuer": {"refs/tags/1.0.0"}, + }, + expected: []cosign.Identity{ + { + Issuer: "test-issuer", + Subject: "refs/tags/1.0.0", + }, + }, + }, + { + name: "issuer regex, subject plain", + allowedIdentities: map[string][]string{ + "test-issuer/*": {"refs/tags/1.0.0"}, + }, + expected: []cosign.Identity{ + { + IssuerRegExp: "test-issuer/*", + Subject: "refs/tags/1.0.0", + }, + }, + }, + { + name: "issuer plain, subject regex", + allowedIdentities: map[string][]string{ + "test-issuer": {"refs/tags/*"}, + }, + expected: []cosign.Identity{ + { + Issuer: "test-issuer", + SubjectRegExp: "refs/tags/*", + }, + }, + }, + { + name: "issuers and subjects mixed patterns", + allowedIdentities: map[string][]string{ + `test-issuer`: {`refs/(heads|tags)/release-.*`, `refs/heads/main`}, + `https://ci\.[a-zA-Z0-9-]+\.example\.com/workflows/[a-zA-Z0-9-]+`: {`refs/heads/main`, `refs/tags/v\d+\.\d+\.\d+`}, + }, + expected: []cosign.Identity{ + { + Issuer: `test-issuer`, + SubjectRegExp: `refs/(heads|tags)/release-.*`, + }, + { + Issuer: `test-issuer`, + Subject: `refs/heads/main`, + }, + { + IssuerRegExp: `https://ci\.[a-zA-Z0-9-]+\.example\.com/workflows/[a-zA-Z0-9-]+`, + Subject: `refs/heads/main`, + }, + { + IssuerRegExp: `https://ci\.[a-zA-Z0-9-]+\.example\.com/workflows/[a-zA-Z0-9-]+`, + SubjectRegExp: `refs/tags/v\d+\.\d+\.\d+`, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := processAllowedIdentities(tt.allowedIdentities) + assert.ElementsMatch(t, tt.expected, actual) + }) + } +} + +type fakeCosignVerifySignatureFn struct { + Responses []fakeResponse + CallCount int +} + +type fakeResponse struct { + Signatures []oci.Signature + BundleVerified bool + Err error +} + +func (f *fakeCosignVerifySignatureFn) Verify(_ context.Context, _ name.Reference, _ *cosign.CheckOpts) ([]oci.Signature, bool, error) { + resp := f.Responses[f.CallCount] + f.CallCount++ + return resp.Signatures, resp.BundleVerified, resp.Err +} + +type fakeCosignVerifyAttestationsFn struct { + Responses []struct { + Signatures []oci.Signature + BundleVerified bool + Err error + } + CallCount int +} + +func (f *fakeCosignVerifyAttestationsFn) Verify(_ context.Context, _ name.Reference, _ *cosign.CheckOpts) ([]oci.Signature, bool, error) { + resp := f.Responses[f.CallCount] + f.CallCount++ + return resp.Signatures, resp.BundleVerified, resp.Err +} + +type fakeGetFulcioRootsFn struct { + Response struct { + Roots *x509.CertPool + Err error + } + CallCount int +} + +func (f *fakeGetFulcioRootsFn) Get() (*x509.CertPool, error) { + f.CallCount++ + return f.Response.Roots, f.Response.Err +} + +type fakeGetFulcioIntermediatesFn struct { + Response struct { + Intermediates *x509.CertPool + Err error + } + CallCount int +} + +func (f *fakeGetFulcioIntermediatesFn) Get() (*x509.CertPool, error) { + f.CallCount++ + return f.Response.Intermediates, f.Response.Err +} + +type fakeGetRekorPubsFn struct { + Response struct { + PubKeys *cosign.TrustedTransparencyLogPubKeys + Err error + } + CallCount int +} + +func (f *fakeGetRekorPubsFn) Get(_ context.Context) (*cosign.TrustedTransparencyLogPubKeys, error) { + f.CallCount++ + return f.Response.PubKeys, f.Response.Err +} + +type fakeGetCTLogPubsFn struct { + Response struct { + PubKeys *cosign.TrustedTransparencyLogPubKeys + Err error + } + CallCount int +} + +func (f *fakeGetCTLogPubsFn) Get(_ context.Context) (*cosign.TrustedTransparencyLogPubKeys, error) { + f.CallCount++ + return f.Response.PubKeys, f.Response.Err +} + +type fakeGetRekorClientFn struct { + Response struct { + Client *rekorclient.Rekor + Err error + } + CallCount int +} + +func (f *fakeGetRekorClientFn) Get(_ string, _ ...client.Option) (*rekorclient.Rekor, error) { + f.CallCount++ + return f.Response.Client, f.Response.Err +} + +type fakeSignature struct { + payload []byte + base64Signature string + cert *x509.Certificate + bundle *bundle.RekorBundle +} + +func (f *fakeSignature) Payload() ([]byte, error) { + return f.payload, nil +} + +func (f *fakeSignature) Base64Signature() (string, error) { + return f.base64Signature, nil +} + +func (f *fakeSignature) Cert() (*x509.Certificate, error) { + return f.cert, nil +} + +func (f *fakeSignature) Bundle() (*bundle.RekorBundle, error) { + return f.bundle, nil +} + +func (f *fakeSignature) Digest() (v1.Hash, error) { + return v1.Hash{}, nil +} + +func (f *fakeSignature) DiffID() (v1.Hash, error) { + return v1.Hash{}, nil +} + +func (f *fakeSignature) Compressed() (io.ReadCloser, error) { + return nil, nil +} + +func (f *fakeSignature) Uncompressed() (io.ReadCloser, error) { + return nil, nil +} + +func (f *fakeSignature) Size() (int64, error) { + return 0, nil +} + +func (f *fakeSignature) MediaType() (types.MediaType, error) { + return "", nil +} + +func (f *fakeSignature) Annotations() (map[string]string, error) { + return nil, nil +} + +func (f *fakeSignature) Signature() ([]byte, error) { + return nil, nil +} + +func (f *fakeSignature) Chain() ([]*x509.Certificate, error) { + return nil, nil +} + +func (f *fakeSignature) RFC3161Timestamp() (*bundle.RFC3161Timestamp, error) { + return nil, nil +} + +type verifierSetup struct { + verifier *ImageVerifier + fakeCosignVerifySignature *fakeCosignVerifySignatureFn + fakeCosignVerifyAttestations *fakeCosignVerifyAttestationsFn + fakeGetFulcioRoots *fakeGetFulcioRootsFn + fakeGetFulcioIntermediates *fakeGetFulcioIntermediatesFn + fakeGetRekorPubs *fakeGetRekorPubsFn + fakeGetCTLogPubs *fakeGetCTLogPubsFn + fakeGetRekorClient *fakeGetRekorClientFn +} + +func setupVerifier() verifierSetup { + config := NewConfig() + logger := hclog.NewNullLogger() + config.Logger = logger + config.RekorURL = publicRekorURL + + fakeCosignVerifySignatureFn := &fakeCosignVerifySignatureFn{} + fakeCosignVerifyAttestationsFn := &fakeCosignVerifyAttestationsFn{} + fakeGetFulcioRootsFn := &fakeGetFulcioRootsFn{} + fakeGetFulcioIntermediatesFn := &fakeGetFulcioIntermediatesFn{} + fakeGetRekorPubsFn := &fakeGetRekorPubsFn{} + fakeGetCTLogPubsFn := &fakeGetCTLogPubsFn{} + fakeGetRekorClientFn := &fakeGetRekorClientFn{} + + verifier := &ImageVerifier{ + config: config, + sigstoreFunctions: sigstoreFunctions{ + verifyImageSignatures: fakeCosignVerifySignatureFn.Verify, + verifyImageAttestations: fakeCosignVerifyAttestationsFn.Verify, + getRekorClient: fakeGetRekorClientFn.Get, + getFulcioRoots: fakeGetFulcioRootsFn.Get, + getFulcioIntermediates: fakeGetFulcioIntermediatesFn.Get, + getRekorPublicKeys: fakeGetRekorPubsFn.Get, + getCTLogPublicKeys: fakeGetCTLogPubsFn.Get, + }, + } + + return verifierSetup{ + verifier: verifier, + fakeCosignVerifySignature: fakeCosignVerifySignatureFn, + fakeCosignVerifyAttestations: fakeCosignVerifyAttestationsFn, + fakeGetFulcioRoots: fakeGetFulcioRootsFn, + fakeGetFulcioIntermediates: fakeGetFulcioIntermediatesFn, + fakeGetRekorPubs: fakeGetRekorPubsFn, + fakeGetCTLogPubs: fakeGetCTLogPubsFn, + fakeGetRekorClient: fakeGetRekorClientFn, + } +} + +func createTestCert() *x509.Certificate { + return &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-common-name", + }, + DNSNames: []string{"test-subject-san", "another-san"}, + Extensions: []pkix.Extension{ + { + Id: []int{1, 3, 6, 1, 4, 1, 57264, 1, 1}, // OIDC issuer OID + Value: []byte("test-issuer"), + }, + }, + } +} + +func createFakePayload() []byte { + signaturePayload := payload.SimpleContainerImage{ + Optional: map[string]interface{}{ + "subject": "test-subject", + }, + } + payloadBytes, _ := json.Marshal(signaturePayload) + return payloadBytes +} + +func createFakeBundle() *bundle.RekorBundle { + signedTimestamp := "test-signed-timestamp" + return &bundle.RekorBundle{ + SignedEntryTimestamp: []byte(signedTimestamp), + Payload: bundle.RekorPayload{ + Body: base64.StdEncoding.EncodeToString([]byte(`{ + "apiVersion": "0.0.1", + "kind": "bundle", + "spec": { + "data": {}, + "signature": { + "content": "base64signature", + "format": "x509", + "publicKey": { + "format": "pem", + "content": "test-public-key" + } + } + } + }`)), + LogID: "test-log-id", + LogIndex: 9876543210, + IntegratedTime: 1234567890, + }, + } +} diff --git a/pkg/agent/endpoints/endpoints_test.go b/pkg/agent/endpoints/endpoints_test.go index 6fe115f3bd3..1b8e42d6db0 100644 --- a/pkg/agent/endpoints/endpoints_test.go +++ b/pkg/agent/endpoints/endpoints_test.go @@ -283,7 +283,7 @@ func (s FakeSDSv3Server) FetchSecrets(ctx context.Context, _ *discovery_v3.Disco } type FakeHealthServer struct { - *grpc_health_v1.UnimplementedHealthServer + grpc_health_v1.UnimplementedHealthServer } func attest(ctx context.Context, attestor PeerTrackerAttestor) error { diff --git a/pkg/agent/manager/cache/cache.go b/pkg/agent/manager/cache/cache.go index 8aad51f5f0b..7ad5293090d 100644 --- a/pkg/agent/manager/cache/cache.go +++ b/pkg/agent/manager/cache/cache.go @@ -166,7 +166,7 @@ func (c *Cache) Identities() []Identity { return out } -func (c *Cache) CountSVIDs() int { +func (c *Cache) CountX509SVIDs() int { c.mu.RLock() defer c.mu.RUnlock() @@ -183,6 +183,10 @@ func (c *Cache) CountSVIDs() int { return records } +func (c *Cache) CountJWTSVIDs() int { + return c.JWTSVIDCache.CountJWTSVIDs() +} + func (c *Cache) MatchingIdentities(selectors []*common.Selector) []Identity { set, setDone := allocSelectorSet(selectors...) defer setDone() diff --git a/pkg/agent/manager/cache/cache_test.go b/pkg/agent/manager/cache/cache_test.go index 151a301f587..9dd0d03e602 100644 --- a/pkg/agent/manager/cache/cache_test.go +++ b/pkg/agent/manager/cache/cache_test.go @@ -99,7 +99,7 @@ func TestCountSVIDs(t *testing.T) { cache.UpdateEntries(updateEntries, nil) // No SVIDs expected - require.Equal(t, 0, cache.CountSVIDs()) + require.Equal(t, 0, cache.CountX509SVIDs()) updateSVIDs := &UpdateSVIDs{ X509SVIDs: makeX509SVIDs(foo), @@ -107,7 +107,7 @@ func TestCountSVIDs(t *testing.T) { cache.UpdateSVIDs(updateSVIDs) // Only one SVID expected - require.Equal(t, 1, cache.CountSVIDs()) + require.Equal(t, 1, cache.CountX509SVIDs()) } func TestBundleChanges(t *testing.T) { diff --git a/pkg/agent/manager/cache/jwt_cache.go b/pkg/agent/manager/cache/jwt_cache.go index e0d13785724..7058fd443be 100644 --- a/pkg/agent/manager/cache/jwt_cache.go +++ b/pkg/agent/manager/cache/jwt_cache.go @@ -16,6 +16,10 @@ type JWTSVIDCache struct { svids map[string]*client.JWTSVID } +func (c *JWTSVIDCache) CountJWTSVIDs() int { + return len(c.svids) +} + func NewJWTSVIDCache() *JWTSVIDCache { return &JWTSVIDCache{ svids: make(map[string]*client.JWTSVID), diff --git a/pkg/agent/manager/cache/lru_cache.go b/pkg/agent/manager/cache/lru_cache.go index fedbb20ad51..eb5b4e5140b 100644 --- a/pkg/agent/manager/cache/lru_cache.go +++ b/pkg/agent/manager/cache/lru_cache.go @@ -11,7 +11,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/v2/bundle/spiffebundle" "github.com/spiffe/go-spiffe/v2/spiffeid" - "github.com/spiffe/spire/pkg/agent/common/backoff" + "github.com/spiffe/spire/pkg/common/backoff" "github.com/spiffe/spire/pkg/common/telemetry" agentmetrics "github.com/spiffe/spire/pkg/common/telemetry/agent" "github.com/spiffe/spire/proto/spire/common" @@ -171,13 +171,20 @@ func (c *LRUCache) Entries() []*common.RegistrationEntry { return out } -func (c *LRUCache) CountSVIDs() int { +func (c *LRUCache) CountX509SVIDs() int { c.mu.RLock() defer c.mu.RUnlock() return len(c.svids) } +func (c *LRUCache) CountJWTSVIDs() int { + c.mu.RLock() + defer c.mu.RUnlock() + + return len(c.JWTSVIDCache.svids) +} + func (c *LRUCache) CountRecords() int { c.mu.RLock() defer c.mu.RUnlock() @@ -446,7 +453,7 @@ func (c *LRUCache) UpdateEntries(update *UpdateEntries, checkSVID func(*common.R func (c *LRUCache) UpdateSVIDs(update *UpdateSVIDs) { c.mu.Lock() - defer func() { agentmetrics.SetSVIDMapSize(c.metrics, c.CountSVIDs()) }() + defer func() { agentmetrics.SetSVIDMapSize(c.metrics, c.CountX509SVIDs()) }() defer c.mu.Unlock() // Allocate a set of selectors that diff --git a/pkg/agent/manager/cache/lru_cache_test.go b/pkg/agent/manager/cache/lru_cache_test.go index 2abc00d3343..4691826842d 100644 --- a/pkg/agent/manager/cache/lru_cache_test.go +++ b/pkg/agent/manager/cache/lru_cache_test.go @@ -92,7 +92,7 @@ func TestLRUCacheCountSVIDs(t *testing.T) { cache.UpdateEntries(updateEntries, nil) // No SVIDs expected - require.Equal(t, 0, cache.CountSVIDs()) + require.Equal(t, 0, cache.CountX509SVIDs()) updateSVIDs := &UpdateSVIDs{ X509SVIDs: makeX509SVIDs(foo), @@ -100,7 +100,7 @@ func TestLRUCacheCountSVIDs(t *testing.T) { cache.UpdateSVIDs(updateSVIDs) // Only one SVID expected - require.Equal(t, 1, cache.CountSVIDs()) + require.Equal(t, 1, cache.CountX509SVIDs()) } func TestLRUCacheCountRecords(t *testing.T) { @@ -705,10 +705,10 @@ func TestLRUCacheSVIDCacheExpiry(t *testing.T) { sub.Finish() } } - assert.Equal(t, 12, cache.CountSVIDs()) + assert.Equal(t, 12, cache.CountX509SVIDs()) cache.UpdateEntries(updateEntries, nil) - assert.Equal(t, 10, cache.CountSVIDs()) + assert.Equal(t, 10, cache.CountX509SVIDs()) // foo SVID should be removed from cache as it does not have active subscriber assert.False(t, cache.notifySubscriberIfSVIDAvailable(makeSelectors("A"), subA.(*lruCacheSubscriber))) @@ -724,7 +724,7 @@ func TestLRUCacheSVIDCacheExpiry(t *testing.T) { require.Len(t, cache.GetStaleEntries(), 1) assert.Equal(t, foo, cache.GetStaleEntries()[0].Entry) - assert.Equal(t, 10, cache.CountSVIDs()) + assert.Equal(t, 10, cache.CountX509SVIDs()) } func TestLRUCacheMaxSVIDCacheSize(t *testing.T) { @@ -741,7 +741,7 @@ func TestLRUCacheMaxSVIDCacheSize(t *testing.T) { X509SVIDs: makeX509SVIDsFromStaleEntries(cache.GetStaleEntries()), }) require.Len(t, cache.GetStaleEntries(), 0) - assert.Equal(t, 10, cache.CountSVIDs()) + assert.Equal(t, 10, cache.CountX509SVIDs()) // Validate that active subscriber will still get SVID even if SVID count is at maxSvidCacheSize foo := makeRegistrationEntry("FOO", "A") @@ -752,12 +752,12 @@ func TestLRUCacheMaxSVIDCacheSize(t *testing.T) { cache.UpdateEntries(updateEntries, nil) require.Len(t, cache.GetStaleEntries(), 1) - assert.Equal(t, 10, cache.CountSVIDs()) + assert.Equal(t, 10, cache.CountX509SVIDs()) cache.UpdateSVIDs(&UpdateSVIDs{ X509SVIDs: makeX509SVIDs(foo), }) - assert.Equal(t, 11, cache.CountSVIDs()) + assert.Equal(t, 11, cache.CountX509SVIDs()) require.Len(t, cache.GetStaleEntries(), 0) } @@ -770,7 +770,7 @@ func TestSyncSVIDsWithSubscribers(t *testing.T) { cache.UpdateSVIDs(&UpdateSVIDs{ X509SVIDs: makeX509SVIDsFromStaleEntries(cache.GetStaleEntries()), }) - assert.Equal(t, 5, cache.CountSVIDs()) + assert.Equal(t, 5, cache.CountX509SVIDs()) // Update foo but its SVID is not yet cached foo := makeRegistrationEntry("FOO", "A") @@ -788,7 +788,7 @@ func TestSyncSVIDsWithSubscribers(t *testing.T) { require.Len(t, cache.GetStaleEntries(), 1) assert.Equal(t, []*StaleEntry{{Entry: cache.records[foo.EntryId].entry}}, cache.GetStaleEntries()) - assert.Equal(t, 5, cache.CountSVIDs()) + assert.Equal(t, 5, cache.CountX509SVIDs()) } func TestNotifySubscriberWhenSVIDIsAvailable(t *testing.T) { @@ -863,7 +863,7 @@ func TestSubscribeToWorkloadUpdatesLRUNoSelectors(t *testing.T) { cache.UpdateSVIDs(&UpdateSVIDs{ X509SVIDs: makeX509SVIDs(foo, bar), }) - assert.Equal(t, 2, cache.CountSVIDs()) + assert.Equal(t, 2, cache.CountX509SVIDs()) select { case err := <-subErrCh: @@ -932,7 +932,7 @@ func TestSubscribeToLRUCacheChanges(t *testing.T) { cache.UpdateSVIDs(&UpdateSVIDs{ X509SVIDs: makeX509SVIDs(foo, bar), }) - assert.Equal(t, 2, cache.CountSVIDs()) + assert.Equal(t, 2, cache.CountX509SVIDs()) clk.WaitForAfter(time.Second, "waiting for after to get called") clk.Add(SVIDSyncInterval * 4) diff --git a/pkg/agent/manager/manager.go b/pkg/agent/manager/manager.go index 40ce50f6b62..3294b1c2fc7 100644 --- a/pkg/agent/manager/manager.go +++ b/pkg/agent/manager/manager.go @@ -13,11 +13,11 @@ import ( "github.com/spiffe/go-spiffe/v2/bundle/spiffebundle" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/agent/client" - "github.com/spiffe/spire/pkg/agent/common/backoff" "github.com/spiffe/spire/pkg/agent/manager/cache" "github.com/spiffe/spire/pkg/agent/manager/storecache" "github.com/spiffe/spire/pkg/agent/storage" "github.com/spiffe/spire/pkg/agent/svid" + "github.com/spiffe/spire/pkg/common/backoff" "github.com/spiffe/spire/pkg/common/nodeutil" "github.com/spiffe/spire/pkg/common/rotationutil" "github.com/spiffe/spire/pkg/common/telemetry" @@ -77,8 +77,14 @@ type Manager interface { // is no JWT cached, the manager will get one signed upstream. FetchJWTSVID(ctx context.Context, entry *common.RegistrationEntry, audience []string) (*client.JWTSVID, error) - // CountSVIDs returns the amount of X509 SVIDs on memory - CountSVIDs() int + // CountX509SVIDs returns the amount of X509 SVIDs on memory + CountX509SVIDs() int + + // CountJWTSVIDs returns the amount of JWT SVIDs on memory + CountJWTSVIDs() int + + // CountSVIDStoreX509SVIDs returns the amount of x509 SVIDs on SVIDStore in-memory cache + CountSVIDStoreX509SVIDs() int // GetLastSync returns the last successful rotation timestamp GetLastSync() time.Time @@ -107,8 +113,11 @@ type Cache interface { // MatchingRegistrationEntries with given selectors MatchingRegistrationEntries(selectors []*common.Selector) []*common.RegistrationEntry - // CountSVIDs in cache stored - CountSVIDs() int + // CountX509SVIDs in cache stored + CountX509SVIDs() int + + // CountJWTSVIDs in cache stored + CountJWTSVIDs() int // FetchWorkloadUpdate for given selectors FetchWorkloadUpdate(selectors []*common.Selector) *cache.WorkloadUpdate @@ -251,8 +260,16 @@ func (m *manager) MatchingRegistrationEntries(selectors []*common.Selector) []*c return m.cache.MatchingRegistrationEntries(selectors) } -func (m *manager) CountSVIDs() int { - return m.cache.CountSVIDs() +func (m *manager) CountX509SVIDs() int { + return m.cache.CountX509SVIDs() +} + +func (m *manager) CountJWTSVIDs() int { + return m.cache.CountJWTSVIDs() +} + +func (m *manager) CountSVIDStoreX509SVIDs() int { + return m.svidStoreCache.CountX509SVIDs() } // FetchWorkloadUpdates gets the latest workload update for the selectors @@ -317,7 +334,7 @@ func (m *manager) runSynchronizer(ctx context.Context) error { // Clamp the sync interval to the default value when the agent doesn't have any SVIDs cached // AND the previous sync request succeeded - if m.cache.CountSVIDs() == 0 { + if m.cache.CountX509SVIDs() == 0 { syncInterval = min(syncInterval, defaultSyncInterval) } } diff --git a/pkg/agent/manager/manager_test.go b/pkg/agent/manager/manager_test.go index 6521f6f9b30..6fd93bf2660 100644 --- a/pkg/agent/manager/manager_test.go +++ b/pkg/agent/manager/manager_test.go @@ -243,7 +243,7 @@ func TestHappyPathWithoutSyncNorRotation(t *testing.T) { require.Equal(t, api.bundle, m.GetBundle()) // Expect three SVIDs on cache - require.Equal(t, 3, m.CountSVIDs()) + require.Equal(t, 3, m.CountX509SVIDs()) // Expect last sync require.Equal(t, clk.Now(), m.GetLastSync()) @@ -335,7 +335,7 @@ func TestRotationWithRSAKey(t *testing.T) { require.Equal(t, api.bundle, m.GetBundle()) // Expect three SVIDs on cache - require.Equal(t, 3, m.CountSVIDs()) + require.Equal(t, 3, m.CountX509SVIDs()) // Expect last sync require.Equal(t, clk.Now(), m.GetLastSync()) @@ -1229,7 +1229,7 @@ func TestSyncSVIDsWithLRUCache(t *testing.T) { assert.NoError(t, subErr, "subscriber error") // ensure 2 SVIDs corresponding to selectors are cached. - assert.Equal(t, 2, m.cache.CountSVIDs()) + assert.Equal(t, 2, m.cache.CountX509SVIDs()) // cancel the ctx to stop go routines cancel() @@ -1688,7 +1688,7 @@ func (h *mockAPI) BatchNewX509SVID(_ context.Context, req *svidv1.BatchNewX509SV entry, ok := entries[param.EntryId] if !ok { resp.Results = append(resp.Results, &svidv1.BatchNewX509SVIDResponse_Result{ - Status: api.CreateStatus(codes.NotFound, "entry %q not found", param.EntryId), + Status: api.CreateStatusf(codes.NotFound, "entry %q not found", param.EntryId), }) continue } diff --git a/pkg/agent/manager/storecache/cache.go b/pkg/agent/manager/storecache/cache.go index a9380512509..f12225bd00a 100644 --- a/pkg/agent/manager/storecache/cache.go +++ b/pkg/agent/manager/storecache/cache.go @@ -250,6 +250,12 @@ func (c *Cache) GetStaleEntries() []*cache.StaleEntry { return staleEntries } +func (c *Cache) CountX509SVIDs() int { + c.mtx.RLock() + defer c.mtx.RUnlock() + return len(c.records) +} + // ReadyToStore returns all records that are ready to be stored func (c *Cache) ReadyToStore() []*Record { c.mtx.Lock() diff --git a/pkg/agent/plugin/nodeattestor/httpchallenge/httpchallenge.go b/pkg/agent/plugin/nodeattestor/httpchallenge/httpchallenge.go new file mode 100644 index 00000000000..ea6eb21aafc --- /dev/null +++ b/pkg/agent/plugin/nodeattestor/httpchallenge/httpchallenge.go @@ -0,0 +1,231 @@ +package httpchallenge + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "os" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/hcl" + nodeattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/nodeattestor/v1" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/plugin/httpchallenge" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + pluginName = "http_challenge" +) + +func BuiltIn() catalog.BuiltIn { + return builtin(New()) +} + +func builtin(p *Plugin) catalog.BuiltIn { + return catalog.MakeBuiltIn(pluginName, + nodeattestorv1.NodeAttestorPluginServer(p), + configv1.ConfigServiceServer(p)) +} + +type configData struct { + port int + advertisedPort int + hostName string + agentName string +} + +type Config struct { + HostName string `hcl:"hostname"` + AgentName string `hcl:"agentname"` + Port int `hcl:"port"` + AdvertisedPort int `hcl:"advertised_port"` +} + +type Plugin struct { + nodeattestorv1.UnsafeNodeAttestorServer + configv1.UnsafeConfigServer + + m sync.RWMutex + c *configData + + log hclog.Logger + + hooks struct { + // Controls which interface to bind to ("" in production, "localhost" + // in tests) and acts as the default HostName value when not provided + // via configuration. + bindHost string + } +} + +func New() *Plugin { + return &Plugin{} +} + +func (p *Plugin) AidAttestation(stream nodeattestorv1.NodeAttestor_AidAttestationServer) (err error) { + data, err := p.getConfig() + if err != nil { + return err + } + + ctx := stream.Context() + + port := data.port + + l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", p.hooks.bindHost, port)) + if err != nil { + return status.Errorf(codes.Internal, "could not listen on port %d: %v", port, err) + } + defer l.Close() + + advertisedPort := data.advertisedPort + if advertisedPort == 0 { + advertisedPort = l.Addr().(*net.TCPAddr).Port + } + + attestationPayload, err := json.Marshal(httpchallenge.AttestationData{ + HostName: data.hostName, + AgentName: data.agentName, + Port: advertisedPort, + }) + if err != nil { + return status.Errorf(codes.Internal, "unable to marshal attestation data: %v", err) + } + + // send the attestation data back to the agent + if err := stream.Send(&nodeattestorv1.PayloadOrChallengeResponse{ + Data: &nodeattestorv1.PayloadOrChallengeResponse_Payload{ + Payload: attestationPayload, + }, + }); err != nil { + return err + } + + // receive challenge + resp, err := stream.Recv() + if err != nil { + return err + } + + challenge := new(httpchallenge.Challenge) + if err := json.Unmarshal(resp.Challenge, challenge); err != nil { + return status.Errorf(codes.Internal, "unable to unmarshal challenge: %v", err) + } + + // due to https://github.com/spiffe/spire/blob/8f9fa036e182a2fab968e03cd25a7fdb2d8c88bb/pkg/agent/plugin/nodeattestor/v1.go#L63, we must respond with a non blank challenge response + responseBytes := []byte{'\n'} + if err := stream.Send(&nodeattestorv1.PayloadOrChallengeResponse{ + Data: &nodeattestorv1.PayloadOrChallengeResponse_ChallengeResponse{ + ChallengeResponse: responseBytes, + }, + }); err != nil { + return err + } + + err = p.serveNonce(ctx, l, data.agentName, challenge.Nonce) + if err != nil { + return status.Errorf(codes.Internal, "failed to start webserver: %v", err) + } + return nil +} + +func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) { + // Parse HCL config payload into config struct + config := new(Config) + if err := hcl.Decode(config, req.HclConfiguration); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "unable to decode configuration: %v", err) + } + + // Make sure the configuration produces valid data + configData, err := p.loadConfigData(config) + if err != nil { + return nil, err + } + + p.setConfig(configData) + + return &configv1.ConfigureResponse{}, nil +} + +func (p *Plugin) serveNonce(ctx context.Context, l net.Listener, agentName string, nonce string) (err error) { + h := http.NewServeMux() + s := &http.Server{ + Handler: h, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + } + path := fmt.Sprintf("/.well-known/spiffe/nodeattestor/http_challenge/%s/challenge", agentName) + p.log.Debug("Setting up nonce handler", "path", path) + h.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, nonce) + }) + + go func() { + <-ctx.Done() + _ = s.Shutdown(context.Background()) + }() + + err = s.Serve(l) + if err == http.ErrServerClosed { + return nil + } + return err +} + +// SetLogger sets this plugin's logger +func (p *Plugin) SetLogger(log hclog.Logger) { + p.log = log +} + +func (p *Plugin) getConfig() (*configData, error) { + p.m.RLock() + defer p.m.RUnlock() + if p.c == nil { + return nil, status.Error(codes.FailedPrecondition, "not configured") + } + return p.c, nil +} + +func (p *Plugin) setConfig(c *configData) { + p.m.Lock() + defer p.m.Unlock() + p.c = c +} + +func (p *Plugin) loadConfigData(config *Config) (*configData, error) { + // Determine the host name to pass to the server. Values are preferred in + // this order: + // 1. HCL HostName configuration value + // 2. OS hostname value + hostName := config.HostName + if hostName == "" { + var err error + hostName, err = os.Hostname() + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "unable to fetch hostname: %v", err) + } + } + + var agentName = "default" + if config.AgentName != "" { + agentName = config.AgentName + } + + if config.AdvertisedPort == 0 { + config.AdvertisedPort = config.Port + } + + return &configData{ + port: config.Port, + advertisedPort: config.AdvertisedPort, + hostName: hostName, + agentName: agentName, + }, nil +} diff --git a/pkg/agent/plugin/nodeattestor/httpchallenge/httpchallenge_test.go b/pkg/agent/plugin/nodeattestor/httpchallenge/httpchallenge_test.go new file mode 100644 index 00000000000..245309c4a4d --- /dev/null +++ b/pkg/agent/plugin/nodeattestor/httpchallenge/httpchallenge_test.go @@ -0,0 +1,214 @@ +package httpchallenge + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "testing" + + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor" + nodeattestortest "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/test" + common_httpchallenge "github.com/spiffe/spire/pkg/common/plugin/httpchallenge" + "github.com/spiffe/spire/test/plugintest" + "github.com/stretchr/testify/require" +) + +var ( + streamBuilder = nodeattestortest.ServerStream("http_challenge") +) + +func TestConfigureCommon(t *testing.T) { + tests := []struct { + name string + hclConf string + expErr string + }{ + { + name: "Configure fails if receives wrong HCL configuration", + hclConf: "not HCL conf", + expErr: "rpc error: code = InvalidArgument desc = unable to decode configuration", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + plugin := newPlugin() + + resp, err := plugin.Configure(context.Background(), &configv1.ConfigureRequest{HclConfiguration: tt.hclConf}) + if tt.expErr != "" { + require.Contains(t, err.Error(), tt.expErr) + require.Nil(t, resp) + return + } + + require.NoError(t, err) + require.NotNil(t, resp) + }) + } +} + +func TestAidAttestationFailures(t *testing.T) { + tests := []struct { + name string + config string + expErr string + serverStream nodeattestor.ServerStream + }{ + { + name: "AidAttestation fails if server does not sends a challenge", + config: "", + expErr: "the error", + serverStream: streamBuilder.FailAndBuild(errors.New("the error")), + }, + { + name: "AidAttestation fails if agent cannot unmarshal server challenge", + config: "", + expErr: "rpc error: code = Internal desc = nodeattestor(http_challenge): unable to unmarshal challenge: invalid character 'o' in literal null (expecting 'u')", + serverStream: streamBuilder.IgnoreThenChallenge([]byte("not-a-challenge")).Build(), + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + var err error + p := loadAndConfigurePlugin(t, tt.config) + + err = p.Attest(context.Background(), tt.serverStream) + if tt.expErr != "" { + require.Contains(t, err.Error(), tt.expErr) + return + } + require.NoError(t, err) + }) + } +} + +func TestAidAttestationSucceeds(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + port := l.Addr().(*net.TCPAddr).Port + defer l.Close() + + tests := []struct { + name string + config string + attestationData common_httpchallenge.AttestationData + serverStream func(attestationData *common_httpchallenge.AttestationData, challenge []byte, expectPayload []byte, challengeobj *common_httpchallenge.Challenge, port int) nodeattestor.ServerStream + }{ + { + name: "Check for random port", + config: "", + attestationData: common_httpchallenge.AttestationData{ + HostName: "spire-dev", + AgentName: "default", + }, + serverStream: func(attestationData *common_httpchallenge.AttestationData, challenge []byte, expectPayload []byte, challengeobj *common_httpchallenge.Challenge, port int) nodeattestor.ServerStream { + return streamBuilder. + Handle(func(challenge []byte) ([]byte, error) { + attestationData := new(common_httpchallenge.AttestationData) + if err := json.Unmarshal(challenge, attestationData); err != nil { + return nil, err + } + if attestationData.Port == port { + return nil, errors.New("random port failed") + } + return nil, nil + }).Build() + }, + }, + { + name: "Check for advertised port", + config: fmt.Sprintf("advertised_port = %d", port), + attestationData: common_httpchallenge.AttestationData{ + HostName: "spire-dev", + AgentName: "default", + }, + serverStream: func(attestationData *common_httpchallenge.AttestationData, challenge []byte, expectPayload []byte, challengeobj *common_httpchallenge.Challenge, port int) nodeattestor.ServerStream { + return streamBuilder. + Handle(func(challenge []byte) ([]byte, error) { + attestationData := new(common_httpchallenge.AttestationData) + if err := json.Unmarshal(challenge, attestationData); err != nil { + return nil, err + } + if attestationData.Port != port { + return nil, errors.New("advertised port failed") + } + return nil, nil + }).Build() + }, + }, + { + name: "Test with defaults except port", + config: "port=9999", + attestationData: common_httpchallenge.AttestationData{ + HostName: "localhost", + AgentName: "default", + Port: 9999, + }, + serverStream: func(attestationData *common_httpchallenge.AttestationData, challenge []byte, expectPayload []byte, challengeobj *common_httpchallenge.Challenge, port int) nodeattestor.ServerStream { + return streamBuilder.IgnoreThenChallenge(challenge). + Handle(func(challengeResponse []byte) ([]byte, error) { + err := common_httpchallenge.VerifyChallenge(context.Background(), http.DefaultClient, attestationData, challengeobj) + return nil, err + }).Build() + }, + }, + { + name: "Full test with all the settings", + config: "hostname=\"localhost\"\nagentname=\"test\"\nport=9999\nadvertised_port=9999", + attestationData: common_httpchallenge.AttestationData{ + HostName: "localhost", + AgentName: "test", + Port: 9999, + }, + serverStream: func(attestationData *common_httpchallenge.AttestationData, challenge []byte, expectPayload []byte, challengeobj *common_httpchallenge.Challenge, port int) nodeattestor.ServerStream { + return streamBuilder.ExpectThenChallenge(expectPayload, challenge). + Handle(func(challengeResponse []byte) ([]byte, error) { + err := common_httpchallenge.VerifyChallenge(context.Background(), http.DefaultClient, attestationData, challengeobj) + return nil, err + }).Build() + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + var err error + expectPayload, err := json.Marshal(&tt.attestationData) + require.NoError(t, err) + + challengeobj, err := common_httpchallenge.GenerateChallenge("") + require.NoError(t, err) + + challenge, err := json.Marshal(challengeobj) + require.NoError(t, err) + + p := loadAndConfigurePlugin(t, tt.config) + + err = p.Attest(context.Background(), tt.serverStream(&tt.attestationData, challenge, expectPayload, challengeobj, port)) + require.NoError(t, err) + }) + } +} + +func loadAndConfigurePlugin(t *testing.T, config string) nodeattestor.NodeAttestor { + return loadPlugin(t, plugintest.Configure(config)) +} + +func loadPlugin(t *testing.T, options ...plugintest.Option) nodeattestor.NodeAttestor { + na := new(nodeattestor.V1) + plugintest.Load(t, builtin(newPlugin()), na, options...) + return na +} + +func newPlugin() *Plugin { + p := New() + p.hooks.bindHost = "localhost" + return p +} diff --git a/pkg/agent/plugin/workloadattestor/docker/docker.go b/pkg/agent/plugin/workloadattestor/docker/docker.go index 3af7c24996d..36ec0b3081f 100644 --- a/pkg/agent/plugin/workloadattestor/docker/docker.go +++ b/pkg/agent/plugin/workloadattestor/docker/docker.go @@ -10,12 +10,14 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" dockerclient "github.com/docker/docker/client" - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/token" workloadattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/workloadattestor/v1" configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/agent/common/sigstore" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/telemetry" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -41,6 +43,7 @@ func builtin(p *Plugin) catalog.BuiltIn { // Docker is a subset of the docker client functionality, useful for mocking. type Docker interface { ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) + ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) } type Plugin struct { @@ -50,9 +53,10 @@ type Plugin struct { log hclog.Logger retryer *retryer - mtx sync.RWMutex - docker Docker - c *containerHelper + mtx sync.RWMutex + docker Docker + c *containerHelper + sigstoreVerifier sigstore.Verifier } func New() *Plugin { @@ -68,6 +72,13 @@ type dockerPluginConfig struct { DockerVersion string `hcl:"docker_version" json:"docker_version"` UnusedKeyPositions map[string][]token.Pos `hcl:",unusedKeyPositions"` + + Experimental experimentalConfig `hcl:"experimental,omitempty" json:"experimental,omitempty"` +} + +type experimentalConfig struct { + // Sigstore contains sigstore specific configs. + Sigstore *sigstore.HCLConfig `hcl:"sigstore,omitempty"` } func (p *Plugin) SetLogger(log hclog.Logger) { @@ -99,8 +110,43 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque return nil, err } + selectors := getSelectorValuesFromConfig(container.Config) + + if p.sigstoreVerifier != nil { + imageName := container.Config.Image + imageJSON, _, err := p.docker.ImageInspectWithRaw(ctx, imageName) + if err != nil { + return nil, fmt.Errorf("failed to inspect image %q: %w", imageName, err) + } + + if len(imageJSON.RepoDigests) == 0 { + return nil, fmt.Errorf("sigstore signature verification failed: no repo digest found for image %s", imageName) + } + + var verified bool + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + var allErrors []string + for _, digest := range imageJSON.RepoDigests { + sigstoreSelectors, err := p.sigstoreVerifier.Verify(ctx, digest) + if err != nil { + p.log.Warn("Error verifying sigstore image signature", telemetry.ImageID, digest, telemetry.Error, err) + allErrors = append(allErrors, fmt.Sprintf("%s %s: %v", telemetry.ImageID, digest, err)) + continue + } + selectors = append(selectors, sigstoreSelectors...) + verified = true + break + } + + if !verified { + return nil, fmt.Errorf("sigstore signature verification failed for image %s: %v", imageName, fmt.Sprintf("errors: %s", strings.Join(allErrors, "; "))) + } + } + return &workloadattestorv1.AttestResponse{ - SelectorValues: getSelectorValuesFromConfig(container.Config), + SelectorValues: selectors, }, nil } @@ -118,7 +164,7 @@ func getSelectorValuesFromConfig(cfg *container.Config) []string { return selectorValues } -func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) { +func (p *Plugin) Configure(ctx context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) { var err error config := &dockerPluginConfig{} if err = hcl.Decode(config, req.HclConfiguration); err != nil { @@ -157,9 +203,21 @@ func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (* return nil, err } + var sigstoreVerifier sigstore.Verifier + if config.Experimental.Sigstore != nil { + cfg := sigstore.NewConfigFromHCL(config.Experimental.Sigstore, p.log) + verifier := sigstore.NewVerifier(cfg) + err = verifier.Init(ctx) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "error initializing sigstore verifier: %v", err) + } + sigstoreVerifier = verifier + } + p.mtx.Lock() defer p.mtx.Unlock() p.docker = docker p.c = containerHelper + p.sigstoreVerifier = sigstoreVerifier return &configv1.ConfigureResponse{}, nil } diff --git a/pkg/agent/plugin/workloadattestor/docker/docker_posix.go b/pkg/agent/plugin/workloadattestor/docker/docker_posix.go index f1dc803c823..19e24ff29b8 100644 --- a/pkg/agent/plugin/workloadattestor/docker/docker_posix.go +++ b/pkg/agent/plugin/workloadattestor/docker/docker_posix.go @@ -8,7 +8,6 @@ import ( "io" "os" "path/filepath" - "regexp" "github.com/gogo/status" "github.com/hashicorp/go-hclog" @@ -40,24 +39,21 @@ type OSConfig struct { } func createHelper(c *dockerPluginConfig, log hclog.Logger) (*containerHelper, error) { - var containerIDFinder cgroup.ContainerIDFinder + useNewContainerLocator := c.UseNewContainerLocator == nil || *c.UseNewContainerLocator - switch { - case c.UseNewContainerLocator != nil && *c.UseNewContainerLocator: - if len(c.ContainerIDCGroupMatchers) > 0 { - return nil, status.Error(codes.InvalidArgument, "the new container locator and custom cgroup matchers cannot both be used") + var containerIDFinder cgroup.ContainerIDFinder + if len(c.ContainerIDCGroupMatchers) > 0 { + if useNewContainerLocator { + return nil, status.Error(codes.InvalidArgument, "the new container locator and custom cgroup matchers cannot both be used; please open an issue if the new container locator fails to locate workload containers in your environment; to continue using custom matchers set use_new_container_locator=false") } - log.Info("Using the new container locator") - case len(c.ContainerIDCGroupMatchers) > 0: - log.Warn("Using the legacy container locator with custom cgroup matchers. The new locator will be enabled by default in a future release. Consider using it now by setting `use_new_container_locator=true`.") + log.Warn("Using the legacy container locator with custom cgroup matchers. This feature will be removed in a future release.") var err error containerIDFinder, err = cgroup.NewContainerIDFinder(c.ContainerIDCGroupMatchers) if err != nil { return nil, err } - default: - log.Warn("Using the legacy container locator. The new locator will be enabled by default in a future release. Consider using it now by setting `use_new_container_locator=true`.") - containerIDFinder = &defaultContainerIDFinder{} + } else { + log.Info("Using the new container locator") } rootDir := c.rootDir @@ -143,26 +139,3 @@ func getContainerIDFromCGroups(finder cgroup.ContainerIDFinder, cgroups []cgroup return containerID, nil } } - -type defaultContainerIDFinder struct{} - -// FindContainerID returns the container ID in the given cgroup path. The cgroup -// path must have the whole word "docker" at some point in the path followed -// at some point by a 64 hex-character container ID. If the cgroup path does -// not match the above description, the method returns false. -func (f *defaultContainerIDFinder) FindContainerID(cgroupPath string) (string, bool) { - m := dockerCGroupRE.FindStringSubmatch(cgroupPath) - if m != nil { - return m[1], true - } - return "", false -} - -// dockerCGroupRE matches cgroup paths that have the following properties. -// 1) `\bdocker\b` the whole word docker -// 2) `.+` followed by one or more characters (which will start on a word boundary due to #1) -// 3) `\b([[:xdigit:]][64])\b` followed by a 64 hex-character container id on word boundary -// -// The "docker" prefix and 64-hex character container id can be anywhere in the path. The only -// requirement is that the docker prefix comes before the id. -var dockerCGroupRE = regexp.MustCompile(`\bdocker\b.+\b([[:xdigit:]]{64})\b`) diff --git a/pkg/agent/plugin/workloadattestor/docker/docker_posix_test.go b/pkg/agent/plugin/workloadattestor/docker/docker_posix_test.go index 8bdf01ff78f..0b8fa6efb73 100644 --- a/pkg/agent/plugin/workloadattestor/docker/docker_posix_test.go +++ b/pkg/agent/plugin/workloadattestor/docker/docker_posix_test.go @@ -14,8 +14,7 @@ import ( ) const ( - testCgroupEntries = "10:devices:/docker/6469646e742065787065637420616e796f6e6520746f20726561642074686973" - defaultPluginConfig = "use_new_container_locator = true" + testCgroupEntries = "10:devices:/docker/6469646e742065787065637420616e796f6e6520746f20726561642074686973" ) func TestContainerExtraction(t *testing.T) { @@ -29,26 +28,34 @@ func TestContainerExtraction(t *testing.T) { { desc: "no match", cgroups: testCgroupEntries, - cfg: `container_id_cgroup_matchers = [ -"/docker/*/", -] -`, + cfg: ` + use_new_container_locator = false + container_id_cgroup_matchers = [ + "/docker/*/", + ] + `, }, { desc: "one miss one match", cgroups: testCgroupEntries, - cfg: `container_id_cgroup_matchers = [ -"/docker/*/", -"/docker/" -]`, + cfg: ` + use_new_container_locator = false + container_id_cgroup_matchers = [ + "/docker/*/", + "/docker/" + ] + `, hasMatch: true, }, { desc: "no container id", cgroups: "10:cpu:/docker/", - cfg: `container_id_cgroup_matchers = [ -"/docker/" -]`, + cfg: ` + use_new_container_locator = false + container_id_cgroup_matchers = [ + "/docker/" + ] + `, expectErr: "a pattern matched, but no container id was found", }, { @@ -64,11 +71,12 @@ func TestContainerExtraction(t *testing.T) { { desc: "more than one id", cgroups: testCgroupEntries + "\n" + "4:devices:/system.slice/docker-41e4ab61d2860b0e1467de0da0a9c6068012761febec402dc04a5a94f32ea867.scope", - expectErr: "multiple container IDs found in cgroups", + expectErr: "multiple container IDs found", }, { - desc: "default finder does not match cgroup missing docker prefix", - cgroups: "4:devices:/system.slice/41e4ab61d2860b0e1467de0da0a9c6068012761febec402dc04a5a94f32ea867.scope", + desc: "default configuration matches cgroup missing docker prefix", + cgroups: "4:devices:/system.slice/6469646e742065787065637420616e796f6e6520746f20726561642074686973.scope", + hasMatch: true, }, } @@ -125,6 +133,7 @@ func TestDockerConfigPosix(t *testing.T) { require.NoError(t, err) p := newTestPlugin(t, withConfig(t, ` +use_new_container_locator = false docker_socket_path = "unix:///socket_path" docker_version = "1.20" container_id_cgroup_matchers = [ @@ -139,6 +148,7 @@ container_id_cgroup_matchers = [ t.Run("bad matcher", func(t *testing.T) { p := New() cfg := ` +use_new_container_locator = false container_id_cgroup_matchers = [ "/docker/", ]` diff --git a/pkg/agent/plugin/workloadattestor/docker/docker_test.go b/pkg/agent/plugin/workloadattestor/docker/docker_test.go index 658a28d74db..f8cabf67175 100644 --- a/pkg/agent/plugin/workloadattestor/docker/docker_test.go +++ b/pkg/agent/plugin/workloadattestor/docker/docker_test.go @@ -3,6 +3,7 @@ package docker import ( "context" "errors" + "fmt" "sort" "testing" "time" @@ -10,16 +11,20 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" dockerclient "github.com/docker/docker/client" + "github.com/hashicorp/go-hclog" + "github.com/spiffe/spire/pkg/agent/common/sigstore" "github.com/spiffe/spire/pkg/agent/plugin/workloadattestor" "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/plugintest" "github.com/spiffe/spire/test/spiretest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" ) const ( testContainerID = "6469646e742065787065637420616e796f6e6520746f20726561642074686973" + testImageID = "test-image-id" ) var disabledRetryer = &retryer{disabled: true} @@ -150,15 +155,36 @@ func TestDockerErrorContextCancel(t *testing.T) { func TestDockerConfig(t *testing.T) { for _, tt := range []struct { - name string - expectCode codes.Code - expectMsg string - config string + name string + expectCode codes.Code + expectMsg string + config string + sigstoreConfigured bool }{ { name: "success configuration", config: `docker_version = "/123/"`, }, + { + name: "sigstore configuration", + config: ` + experimental { + sigstore { + allowed_identities = { + "test-issuer-1" = ["*@example.com", "subject@otherdomain.com"] + "test-issuer-2" = ["domain/ci.yaml@refs/tags/*"] + } + skipped_images = ["registry/image@sha256:examplehash"] + rekor_url = "https://test.dev" + ignore_sct = true + ignore_tlog = true + ignore_attestations = true + registry_username = "user" + registry_password = "pass" + } + }`, + sigstoreConfigured: true, + }, { name: "bad hcl", config: ` @@ -185,6 +211,12 @@ invalid2 = "/no/"`, plugintest.CaptureConfigureError(&err)) spiretest.RequireGRPCStatusHasPrefix(t, err, tt.expectCode, tt.expectMsg) + + if tt.sigstoreConfigured { + assert.NotNil(t, p.sigstoreVerifier) + } else { + assert.Nil(t, p.sigstoreVerifier) + } }) } } @@ -194,10 +226,105 @@ func TestDockerConfigDefault(t *testing.T) { require.NotNil(t, p.docker) require.Equal(t, dockerclient.DefaultDockerHost, p.docker.(*dockerclient.Client).DaemonHost()) - require.Equal(t, "1.46", p.docker.(*dockerclient.Client).ClientVersion()) + require.Equal(t, "1.47", p.docker.(*dockerclient.Client).ClientVersion()) verifyConfigDefault(t, p.c) } +func TestNewConfigFromHCL(t *testing.T) { + cases := []struct { + name string + hcl *sigstore.HCLConfig + want *sigstore.Config + }{ + { + name: "complete sigstore configuration", + hcl: &sigstore.HCLConfig{ + AllowedIdentities: map[string][]string{ + "test-issuer-1": {"*@example.com", "subject@otherdomain.com"}, + "test-issuer-2": {"domain/ci.yaml@refs/tags/*"}, + }, + SkippedImages: []string{"registry/image@sha256:examplehash"}, + RekorURL: strPtr("https://test.dev"), + IgnoreSCT: boolPtr(true), + IgnoreTlog: boolPtr(true), + IgnoreAttestations: boolPtr(true), + RegistryCredentials: map[string]*sigstore.RegistryCredential{ + "registry": { + Username: "user", + Password: "pass", + }, + }, + }, + want: &sigstore.Config{ + AllowedIdentities: map[string][]string{ + "test-issuer-1": {"*@example.com", "subject@otherdomain.com"}, + "test-issuer-2": {"domain/ci.yaml@refs/tags/*"}, + }, + SkippedImages: map[string]struct{}{"registry/image@sha256:examplehash": {}}, + RekorURL: "https://test.dev", + IgnoreSCT: true, + IgnoreTlog: true, + IgnoreAttestations: true, + RegistryCredentials: map[string]*sigstore.RegistryCredential{ + "registry": { + Username: "user", + Password: "pass", + }, + }, + Logger: hclog.NewNullLogger(), + }, + }, + { + name: "empty sigstore configuration", + hcl: &sigstore.HCLConfig{}, + want: &sigstore.Config{ + RekorURL: "", + IgnoreSCT: false, + IgnoreTlog: false, + IgnoreAttestations: false, + AllowedIdentities: map[string][]string{}, + SkippedImages: map[string]struct{}{}, + Logger: hclog.NewNullLogger(), + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + log := hclog.NewNullLogger() + cfg := sigstore.NewConfigFromHCL(tc.hcl, log) + require.Equal(t, tc.want, cfg) + }) + } +} + +func TestSigstoreVerifier(t *testing.T) { + fakeVerifier := &fakeSigstoreVerifier{ + expectedImageID: testImageID, + selectors: []string{"sigstore:selector"}, + err: nil, + } + + fakeDocker := fakeContainer{ + Labels: map[string]string{"label": "value"}, + Image: testImageID, + Env: []string{"VAR=val"}, + } + + p := newTestPlugin(t, withDocker(fakeDocker), withDefaultDataOpt(t), withSigstoreVerifier(fakeVerifier)) + + // Run attestation + selectors, err := doAttest(t, p) + require.NoError(t, err) + expectedSelectors := []string{ + "env:VAR=val", + "label:label:value", + fmt.Sprintf("image_id:%s", testImageID), + "sigstore:selector", + } + require.ElementsMatch(t, expectedSelectors, selectors) +} + func doAttest(t *testing.T, p *Plugin) ([]string, error) { return doAttestWithContext(context.Background(), t, p) } @@ -246,9 +373,15 @@ func withDisabledRetryer() testPluginOpt { } } +func withSigstoreVerifier(v sigstore.Verifier) testPluginOpt { + return func(p *Plugin) { + p.sigstoreVerifier = v + } +} + func newTestPlugin(t *testing.T, opts ...testPluginOpt) *Plugin { p := New() - err := doConfigure(t, p, defaultPluginConfig) + err := doConfigure(t, p, "") require.NoError(t, err) for _, o := range opts { @@ -263,6 +396,10 @@ func (dockerError) ContainerInspect(context.Context, string) (types.ContainerJSO return types.ContainerJSON{}, errors.New("docker error") } +func (dockerError) ImageInspectWithRaw(context.Context, string) (types.ImageInspect, []byte, error) { + return types.ImageInspect{}, nil, errors.New("docker error") +} + type fakeContainer container.Config func (f fakeContainer) ContainerInspect(_ context.Context, containerID string) (types.ContainerJSON, error) { @@ -274,3 +411,28 @@ func (f fakeContainer) ContainerInspect(_ context.Context, containerID string) ( Config: &config, }, nil } + +func (f fakeContainer) ImageInspectWithRaw(_ context.Context, imageName string) (types.ImageInspect, []byte, error) { + return types.ImageInspect{ID: imageName, RepoDigests: []string{testImageID}}, nil, nil +} + +type fakeSigstoreVerifier struct { + expectedImageID string + selectors []string + err error +} + +func (f *fakeSigstoreVerifier) Verify(_ context.Context, imageID string) ([]string, error) { + if imageID != f.expectedImageID { + return nil, fmt.Errorf("unexpected image ID: %s", imageID) + } + return f.selectors, f.err +} + +func strPtr(s string) *string { + return &s +} + +func boolPtr(b bool) *bool { + return &b +} diff --git a/pkg/agent/plugin/workloadattestor/docker/docker_windows_test.go b/pkg/agent/plugin/workloadattestor/docker/docker_windows_test.go index b9d06438240..3f71a27829b 100644 --- a/pkg/agent/plugin/workloadattestor/docker/docker_windows_test.go +++ b/pkg/agent/plugin/workloadattestor/docker/docker_windows_test.go @@ -12,10 +12,6 @@ import ( "google.golang.org/grpc/codes" ) -const ( - defaultPluginConfig = "" -) - func TestFailToGetContainerID(t *testing.T) { h := &fakeProcessHelper{ err: errors.New("oh no"), diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s.go b/pkg/agent/plugin/workloadattestor/k8s/k8s.go index f9ef212a12a..8432c5d8b53 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s.go @@ -22,10 +22,12 @@ import ( "github.com/hashicorp/hcl" workloadattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/workloadattestor/v1" configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/agent/common/sigstore" "github.com/spiffe/spire/pkg/common/catalog" "github.com/spiffe/spire/pkg/common/pemutil" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/valyala/fastjson" + "golang.org/x/sync/singleflight" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" @@ -41,7 +43,6 @@ const ( defaultTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" //nolint: gosec // false positive defaultNodeNameEnv = "MY_NODE_NAME" defaultReloadInterval = time.Minute - maximumAmountCache = 10 ) func BuiltIn() catalog.BuiltIn { @@ -123,8 +124,8 @@ type HCLConfig struct { DisableContainerSelectors bool `hcl:"disable_container_selectors"` // UseNewContainerLocator, if true, uses the new container locator - // mechanism instead of the legacy cgroup matchers. Defaults to false if - // unset. This will default to true in a future release. + // mechanism instead of the legacy cgroup matchers. Defaults to true if + // unset. This configurable will be removed in a future release. UseNewContainerLocator *bool `hcl:"use_new_container_locator"` // VerboseContainerLocatorLogs, if true, dumps extra information to the log @@ -132,27 +133,12 @@ type HCLConfig struct { VerboseContainerLocatorLogs bool `hcl:"verbose_container_locator_logs"` // Experimental enables experimental features. - Experimental *ExperimentalK8SConfig `hcl:"experimental,omitempty"` + Experimental experimentalK8SConfig `hcl:"experimental,omitempty"` } -type ExperimentalK8SConfig struct { +type experimentalK8SConfig struct { // Sigstore contains sigstore specific configs. - Sigstore *SigstoreHCLConfig `hcl:"sigstore,omitempty"` -} - -// SigstoreHCLConfig holds the sigstore configuration parsed from HCL -type SigstoreHCLConfig struct { - // EnforceSCT is the parameter to be set as false in case of a private deployment not using the public CT - EnforceSCT *bool `hcl:"enforce_sct, omitempty"` - - // RekorURL is the URL for the rekor server to use to verify signatures and public keys - RekorURL *string `hcl:"rekor_url,omitempty"` - - // SkippedImages is a list of images that should skip sigstore verification - SkippedImages []string `hcl:"skip_signature_verification_image_list"` - - // AllowedSubjects is a list of subjects that should be allowed after verification - AllowedSubjects map[string][]string `hcl:"allowed_subjects_list"` + Sigstore *sigstore.HCLConfig `hcl:"sigstore,omitempty"` } // k8sConfig holds the configuration distilled from HCL @@ -177,7 +163,6 @@ type k8sConfig struct { type ContainerHelper interface { Configure(config *HCLConfig, log hclog.Logger) error - GetOSSelectors(ctx context.Context, log hclog.Logger, containerStatus *corev1.ContainerStatus) ([]string, error) GetPodUIDAndContainerID(pID int32, log hclog.Logger) (types.UID, string, error) } @@ -190,24 +175,43 @@ type Plugin struct { rootDir string getenv func(string) string - mu sync.RWMutex - config *k8sConfig - containerHelper ContainerHelper + mu sync.RWMutex + config *k8sConfig + containerHelper ContainerHelper + sigstoreVerifier sigstore.Verifier + + cachedPodList map[string]*fastjson.Value + singleflight singleflight.Group + + shutdownCtx context.Context + shutdownCtxCancel context.CancelFunc + shutdownWG sync.WaitGroup } func New() *Plugin { + ctx, cancel := context.WithCancel(context.Background()) + return &Plugin{ - clock: clock.New(), - getenv: os.Getenv, + clock: clock.New(), + getenv: os.Getenv, + shutdownCtx: ctx, + shutdownCtxCancel: cancel, } } +func (p *Plugin) Close() error { + p.shutdownCtxCancel() + p.shutdownWG.Wait() + + return nil +} + func (p *Plugin) SetLogger(log hclog.Logger) { p.log = log } func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestRequest) (*workloadattestorv1.AttestResponse, error) { - config, containerHelper, err := p.getConfig() + config, containerHelper, sigstoreVerifier, err := p.getConfig() if err != nil { return nil, err } @@ -234,22 +238,15 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque for attempt := 1; ; attempt++ { log = log.With(telemetry.Attempt, attempt) - podListBytes, err := config.Client.GetPodList() + podList, err := p.getPodList(ctx, config.Client) if err != nil { return nil, err } - var parser fastjson.Parser - podList, err := parser.ParseBytes(podListBytes) - if err != nil { - return nil, status.Errorf(codes.Internal, "unable to parse kubelet response: %v", err) - } - var attestResponse *workloadattestorv1.AttestResponse - for _, podValue := range podList.GetArray("items") { + for podKey, podValue := range podList { if podKnown { - uidBytes := podValue.Get("metadata", "uid").GetStringBytes() - if string(uidBytes) != string(podUID) { + if podKey != string(podUID) { // The pod holding the container is known. Skip unrelated pods. continue } @@ -275,14 +272,15 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque selectorValues = append(selectorValues, getSelectorValuesFromPodInfo(pod)...) if !config.DisableContainerSelectors { selectorValues = append(selectorValues, getSelectorValuesFromWorkloadContainerStatus(containerStatus)...) + } - osSelector, err := containerHelper.GetOSSelectors(ctx, log, containerStatus) - switch { - case err != nil: - return nil, err - case len(osSelector) > 0: - selectorValues = append(selectorValues, osSelector...) + if sigstoreVerifier != nil { + log.Debug("Attempting to verify sigstore image signature", "image", containerStatus.Image) + sigstoreSelectors, err := p.sigstoreVerifier.Verify(ctx, containerStatus.ImageID) + if err != nil { + return nil, status.Errorf(codes.Internal, "error verifying sigstore image signature for imageID %s: %v", containerStatus.ImageID, err) } + selectorValues = append(selectorValues, sigstoreSelectors...) } case podKnown && config.DisableContainerSelectors: @@ -322,7 +320,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque } } -func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (resp *configv1.ConfigureResponse, err error) { +func (p *Plugin) Configure(ctx context.Context, req *configv1.ConfigureRequest) (resp *configv1.ConfigureResponse, err error) { // Parse HCL config payload into config struct config := new(HCLConfig) if err := hcl.Decode(config, req.HclConfiguration); err != nil { @@ -407,29 +405,69 @@ func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (r return nil, err } + var sigstoreVerifier sigstore.Verifier + if config.Experimental.Sigstore != nil { + cfg := sigstore.NewConfigFromHCL(config.Experimental.Sigstore, p.log) + verifier := sigstore.NewVerifier(cfg) + err = verifier.Init(ctx) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "error initializing sigstore verifier: %v", err) + } + sigstoreVerifier = verifier + } + // Set the config - p.setConfig(c, containerHelper) + p.setConfig(c, containerHelper, sigstoreVerifier) return &configv1.ConfigureResponse{}, nil } -func (p *Plugin) setConfig(config *k8sConfig, containerHelper ContainerHelper) { +func (p *Plugin) setConfig(config *k8sConfig, containerHelper ContainerHelper, sigstoreVerifier sigstore.Verifier) { p.mu.Lock() defer p.mu.Unlock() p.config = config p.containerHelper = containerHelper + p.sigstoreVerifier = sigstoreVerifier } -func (p *Plugin) getConfig() (*k8sConfig, ContainerHelper, error) { +func (p *Plugin) getConfig() (*k8sConfig, ContainerHelper, sigstore.Verifier, error) { p.mu.RLock() defer p.mu.RUnlock() if p.config == nil { - return nil, nil, status.Error(codes.FailedPrecondition, "not configured") + return nil, nil, nil, status.Error(codes.FailedPrecondition, "not configured") } if err := p.reloadKubeletClient(p.config); err != nil { p.log.Warn("Unable to load kubelet client", "err", err) } - return p.config, p.containerHelper, nil + return p.config, p.containerHelper, p.sigstoreVerifier, nil +} + +func (p *Plugin) setPodListCache(podList map[string]*fastjson.Value, expires time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + p.cachedPodList = podList + + p.shutdownWG.Add(1) + go func() { + defer p.shutdownWG.Done() + + select { + case <-p.clock.After(expires): + case <-p.shutdownCtx.Done(): + } + + p.mu.Lock() + defer p.mu.Unlock() + p.cachedPodList = nil + }() +} + +func (p *Plugin) getPodListCache() map[string]*fastjson.Value { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.cachedPodList } func (p *Plugin) setContainerHelper(c ContainerHelper) { @@ -607,16 +645,67 @@ func (p *Plugin) getNodeName(name string, env string) string { } } +func (p *Plugin) getPodList(ctx context.Context, client *kubeletClient) (map[string]*fastjson.Value, error) { + result := p.getPodListCache() + if result != nil { + return result, nil + } + + podList, err, _ := p.singleflight.Do("podList", func() (interface{}, error) { + result := p.getPodListCache() + if result != nil { + return result, nil + } + + podListBytes, err := client.GetPodList(ctx) + if err != nil { + return nil, err + } + + var parser fastjson.Parser + podList, err := parser.ParseBytes(podListBytes) + if err != nil { + return nil, status.Errorf(codes.Internal, "unable to parse kubelet response: %v", err) + } + + items := podList.GetArray("items") + result = make(map[string]*fastjson.Value, len(items)) + + for _, podValue := range items { + uid := string(podValue.Get("metadata", "uid").GetStringBytes()) + + if uid == "" { + p.log.Warn("Pod has no UID", "pod", podValue) + continue + } + + result[uid] = podValue + } + + p.setPodListCache(result, p.config.PollRetryInterval/2) + + return result, nil + }) + if err != nil { + return nil, err + } + + return podList.(map[string]*fastjson.Value), nil +} + type kubeletClient struct { Transport *http.Transport URL url.URL Token string } -func (c *kubeletClient) GetPodList() ([]byte, error) { +func (c *kubeletClient) GetPodList(ctx context.Context) ([]byte, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + url := c.URL url.Path = "/pods" - req, err := http.NewRequest("GET", url.String(), nil) + req, err := http.NewRequestWithContext(ctx, "GET", url.String(), nil) if err != nil { return nil, status.Errorf(codes.Internal, "unable to create request: %v", err) } diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 82fab2a23d7..4f6b9968333 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -3,7 +3,6 @@ package k8s import ( - "context" "io" "log" "os" @@ -14,12 +13,9 @@ import ( "github.com/hashicorp/go-hclog" "github.com/spiffe/spire/pkg/agent/common/cgroups" - "github.com/spiffe/spire/pkg/agent/plugin/workloadattestor/k8s/sigstore" "github.com/spiffe/spire/pkg/common/containerinfo" - "github.com/spiffe/spire/pkg/common/telemetry" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) @@ -43,50 +39,22 @@ func createHelper(c *Plugin) ContainerHelper { type containerHelper struct { rootDir string - sigstoreClient sigstore.Sigstore useNewContainerLocator bool verboseContainerLocatorLogs bool } func (h *containerHelper) Configure(config *HCLConfig, log hclog.Logger) error { - // set experimental flags - if config.Experimental != nil && config.Experimental.Sigstore != nil { - if h.sigstoreClient == nil { - newcache := sigstore.NewCache(maximumAmountCache) - h.sigstoreClient = sigstore.New(newcache, nil) - } - - if err := configureSigstoreClient(h.sigstoreClient, config.Experimental.Sigstore, log); err != nil { - return err - } - } - h.verboseContainerLocatorLogs = config.VerboseContainerLocatorLogs - h.useNewContainerLocator = config.UseNewContainerLocator != nil && *config.UseNewContainerLocator + h.useNewContainerLocator = config.UseNewContainerLocator == nil || *config.UseNewContainerLocator if h.useNewContainerLocator { log.Info("Using the new container locator") } else { - log.Warn("Using the legacy container locator. The new locator will be enabled by default in a future release. Consider using it now by setting `use_new_container_locator=true`.") + log.Warn("Using the legacy container locator. This option will removed in a future release.") } return nil } -func (h *containerHelper) GetOSSelectors(ctx context.Context, log hclog.Logger, containerStatus *corev1.ContainerStatus) ([]string, error) { - var selectors []string - if h.sigstoreClient != nil { - log.Debug("Attempting to get signature info for container", telemetry.ContainerName, containerStatus.Name) - sigstoreSelectors, err := h.sigstoreClient.AttestContainerSignatures(ctx, containerStatus) - if err != nil { - log.Error("Error retrieving signature payload", "error", err) - return nil, status.Errorf(codes.Internal, "error retrieving signature payload: %v", err) - } - selectors = append(selectors, sigstoreSelectors...) - } - - return selectors, nil -} - func (h *containerHelper) GetPodUIDAndContainerID(pID int32, log hclog.Logger) (types.UID, string, error) { if !h.useNewContainerLocator { cgroups, err := cgroups.GetCgroups(pID, dirFS(h.rootDir)) @@ -233,37 +201,6 @@ func canonicalizePodUID(uid string) types.UID { }, uid)) } -func configureSigstoreClient(client sigstore.Sigstore, c *SigstoreHCLConfig, log hclog.Logger) error { - // Rekor URL is required - if c.RekorURL == nil { - return status.Errorf(codes.InvalidArgument, "missing Rekor URL") - } - if err := client.SetRekorURL(*c.RekorURL); err != nil { - return status.Errorf(codes.InvalidArgument, "failed to set Rekor URL: %v", err) - } - - // Configure sigstore settings - enforceSCT := true - if c.EnforceSCT != nil { - enforceSCT = *c.EnforceSCT - } - - client.SetEnforceSCT(enforceSCT) - - client.ClearSkipList() - if c.SkippedImages != nil { - client.AddSkippedImages(c.SkippedImages) - } - client.SetLogger(log) - client.ClearAllowedSubjects() - for issuer, subjects := range c.AllowedSubjects { - for _, subject := range subjects { - client.AddAllowedSubject(issuer, subject) - } - } - return nil -} - type dirFS string func (d dirFS) Open(p string) (io.ReadCloser, error) { diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go index 5926037b81e..0acb2019cb7 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go @@ -3,26 +3,18 @@ package k8s import ( - "bytes" "context" - "errors" "fmt" "os" "path/filepath" "testing" - "github.com/hashicorp/go-hclog" - "github.com/sigstore/cosign/v2/pkg/oci" "github.com/spiffe/spire/pkg/agent/common/cgroups" "github.com/spiffe/spire/pkg/agent/plugin/workloadattestor" - "github.com/spiffe/spire/pkg/agent/plugin/workloadattestor/k8s/sigstore" "github.com/spiffe/spire/proto/spire/common" - "github.com/spiffe/spire/test/plugintest" "github.com/spiffe/spire/test/spiretest" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) @@ -103,51 +95,6 @@ var ( {Type: "k8s", Value: "pod-uid:d488cae9-b2a0-11e7-9350-020968147796"}, {Type: "k8s", Value: "sa:flannel"}, } - - testSigstoreSelectors = []*common.Selector{ - {Type: "k8s", Value: "container-image:docker-pullable://localhost/spiffe/blog@sha256:0cfdaced91cb46dd7af48309799a3c351e4ca2d5e1ee9737ca0cbd932cb79898"}, - {Type: "k8s", Value: "container-image:localhost/spiffe/blog:latest"}, - {Type: "k8s", Value: "container-name:blog"}, - {Type: "k8s", Value: "docker://9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961:image-signature-subject:sigstore-subject"}, - {Type: "k8s", Value: "node-name:k8s-node-1"}, - {Type: "k8s", Value: "ns:default"}, - {Type: "k8s", Value: "pod-image-count:2"}, - {Type: "k8s", Value: "pod-image:docker-pullable://localhost/spiffe/blog@sha256:0cfdaced91cb46dd7af48309799a3c351e4ca2d5e1ee9737ca0cbd932cb79898"}, - {Type: "k8s", Value: "pod-image:docker-pullable://localhost/spiffe/ghostunnel@sha256:b2fc20676c92a433b9a91f3f4535faddec0c2c3613849ac12f02c1d5cfcd4c3a"}, - {Type: "k8s", Value: "pod-image:localhost/spiffe/blog:latest"}, - {Type: "k8s", Value: "pod-image:localhost/spiffe/ghostunnel:latest"}, - {Type: "k8s", Value: "pod-init-image-count:0"}, - {Type: "k8s", Value: "pod-label:k8s-app:blog"}, - {Type: "k8s", Value: "pod-label:version:v0"}, - {Type: "k8s", Value: "pod-name:blog-24ck7"}, - {Type: "k8s", Value: "pod-owner-uid:ReplicationController:2c401175-b29f-11e7-9350-020968147796"}, - {Type: "k8s", Value: "pod-owner:ReplicationController:blog"}, - {Type: "k8s", Value: "pod-uid:2c48913c-b29f-11e7-9350-020968147796"}, - {Type: "k8s", Value: "sa:default"}, - {Type: "k8s", Value: "sigstore-validation:passed"}, - } - - testSigstoreSkippedSelectors = []*common.Selector{ - {Type: "k8s", Value: "container-image:docker-pullable://localhost/spiffe/blog@sha256:0cfdaced91cb46dd7af48309799a3c351e4ca2d5e1ee9737ca0cbd932cb79898"}, - {Type: "k8s", Value: "container-image:localhost/spiffe/blog:latest"}, - {Type: "k8s", Value: "container-name:blog"}, - {Type: "k8s", Value: "node-name:k8s-node-1"}, - {Type: "k8s", Value: "ns:default"}, - {Type: "k8s", Value: "pod-image-count:2"}, - {Type: "k8s", Value: "pod-image:docker-pullable://localhost/spiffe/blog@sha256:0cfdaced91cb46dd7af48309799a3c351e4ca2d5e1ee9737ca0cbd932cb79898"}, - {Type: "k8s", Value: "pod-image:docker-pullable://localhost/spiffe/ghostunnel@sha256:b2fc20676c92a433b9a91f3f4535faddec0c2c3613849ac12f02c1d5cfcd4c3a"}, - {Type: "k8s", Value: "pod-image:localhost/spiffe/blog:latest"}, - {Type: "k8s", Value: "pod-image:localhost/spiffe/ghostunnel:latest"}, - {Type: "k8s", Value: "pod-init-image-count:0"}, - {Type: "k8s", Value: "pod-label:k8s-app:blog"}, - {Type: "k8s", Value: "pod-label:version:v0"}, - {Type: "k8s", Value: "pod-name:blog-24ck7"}, - {Type: "k8s", Value: "pod-owner-uid:ReplicationController:2c401175-b29f-11e7-9350-020968147796"}, - {Type: "k8s", Value: "pod-owner:ReplicationController:blog"}, - {Type: "k8s", Value: "pod-uid:2c48913c-b29f-11e7-9350-020968147796"}, - {Type: "k8s", Value: "sa:default"}, - {Type: "k8s", Value: "sigstore-validation:passed"}, - } ) func (s *Suite) TestAttestWithInitPidInPod() { @@ -212,176 +159,6 @@ func (s *Suite) TestAttestAgainstNodeOverride() { s.Require().Empty(selectors) } -func (s *Suite) TestFailedToCreateHelperFromConfigure() { - t := s.T() - p := s.newPlugin() - - var err error - plugintest.Load(t, builtin(p), nil, - plugintest.Configure(` - experimental = { - sigstore = { - rekor_url = "inva{{{lid}" - } - } - `), - plugintest.CaptureConfigureError(&err)) - spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "failed to set Rekor URL: host is required on rekor URL") -} - -func (s *Suite) TestHelperConfigure() { - rekorURL := "https://rekor.example.com/" - invalidURL := "invalid url" - for _, tt := range []struct { - name string - config *HCLConfig - errCode codes.Code - errMsg string - clientErr error - - expectSkippedImages map[string]struct{} - expectRekoURL string - expectSubjects map[string]map[string]struct{} - }{ - { - name: "sigstore is configured", - config: &HCLConfig{ - Experimental: &ExperimentalK8SConfig{ - Sigstore: &SigstoreHCLConfig{ - RekorURL: &rekorURL, - SkippedImages: []string{"sha:image1hash", "sha:image2hash"}, - AllowedSubjects: map[string][]string{"issuer": {"spirex@example.com", "spirex1@example.com"}}, - }, - }, - }, - expectRekoURL: rekorURL, - expectSkippedImages: map[string]struct{}{ - "sha:image1hash": {}, - "sha:image2hash": {}, - }, - expectSubjects: map[string]map[string]struct{}{ - "issuer": { - "spirex@example.com": {}, - "spirex1@example.com": {}, - }, - }, - }, - { - name: "only reko url", - config: &HCLConfig{ - Experimental: &ExperimentalK8SConfig{ - Sigstore: &SigstoreHCLConfig{ - RekorURL: &rekorURL, - }, - }, - }, - expectRekoURL: rekorURL, - }, - { - name: "missing url, use default", - config: &HCLConfig{ - Experimental: &ExperimentalK8SConfig{ - Sigstore: &SigstoreHCLConfig{ - RekorURL: nil, - }, - }, - }, - errCode: codes.InvalidArgument, - errMsg: "missing Rekor URL", - }, - { - name: "failed to set url", - config: &HCLConfig{ - Experimental: &ExperimentalK8SConfig{ - Sigstore: &SigstoreHCLConfig{ - RekorURL: &invalidURL, - }, - }, - }, - clientErr: errors.New("oh no"), - errCode: codes.InvalidArgument, - errMsg: "failed to set Rekor URL: oh no", - }, - } { - s.T().Run(tt.name, func(t *testing.T) { - fakeClient := &sigstoreMock{ - returnError: tt.clientErr, - } - h := &containerHelper{ - sigstoreClient: fakeClient, - } - - err := h.Configure(tt.config, hclog.NewNullLogger()) - - if tt.errMsg != "" { - spiretest.RequireGRPCStatus(t, err, tt.errCode, tt.errMsg) - return - } - - require.NoError(t, err) - require.NotNil(t, h.sigstoreClient) - - require.Equal(t, tt.expectSkippedImages, fakeClient.skippedImages) - require.Equal(t, tt.expectRekoURL, fakeClient.rekorURL) - require.Equal(t, tt.expectSubjects, fakeClient.allowedSubjects) - }) - } -} - -func (s *Suite) TestAttestWithSigstoreSignatures() { - s.startInsecureKubelet() - s.oc.fakeClient.selectors = []sigstore.SelectorsFromSignatures{ - { - Subject: "sigstore-subject", - }, - } - p := s.loadInsecurePlugin() - s.requireAttestSuccessWithPodAndSignature(p) -} - -func (s *Suite) TestAttestWithSigstoreSkippedImage() { - s.startInsecureKubelet() - // Skip the image - s.oc.fakeClient.rekorURL = "reo" - s.oc.fakeClient.skipSigs = true - s.oc.fakeClient.skippedSigSelectors = []string{"sigstore-validation:passed"} - p := s.loadInsecurePlugin() - s.requireAttestSuccessWithPodAndSkippedImage(p) -} - -func (s *Suite) TestAttestWithFailedSigstoreSignatures() { - s.startInsecureKubelet() - - p := s.newPlugin() - - v1 := new(workloadattestor.V1) - plugintest.Load(s.T(), builtin(p), v1, - plugintest.Configure(fmt.Sprintf(` - kubelet_read_only_port = %d - max_poll_attempts = 5 - poll_retry_interval = "1s" - `, s.kubeletPort())), - ) - - if cHelper := s.oc.getContainerHelper(p); cHelper != nil { - p.setContainerHelper(cHelper) - } - - buf := bytes.Buffer{} - newLog := hclog.New(&hclog.LoggerOptions{ - Output: &buf, - }) - - p.SetLogger(newLog) - - s.oc.fakeClient.returnError = errors.New("sigstore error 123") - - s.requireAttestFailureWithPod(v1, codes.Internal, "error retrieving signature payload: sigstore error 123") - logString := buf.String() - s.Require().Contains(logString, "Error retrieving signature payload") - s.Require().Contains(logString, "error=\"sigstore error 123\"") -} - func (s *Suite) TestAttestWhenContainerNotReadyButContainerSelectorsDisabled() { // This test will not pass on windows since obtaining the container ID is // currently required to identify the workload pod in that environment. @@ -651,145 +428,16 @@ func TestGetPodUIDAndContainerIDFromCGroupPath(t *testing.T) { } } -func (s *Suite) requireAttestSuccessWithPodAndSignature(p workloadattestor.WorkloadAttestor) { - s.addPodListResponse(podListFilePath) - s.addCgroupsResponse(cgPidInPodFilePath) - s.requireAttestSuccess(p, testSigstoreSelectors) -} - -func (s *Suite) requireAttestSuccessWithPodAndSkippedImage(p workloadattestor.WorkloadAttestor) { - s.addPodListResponse(podListFilePath) - s.addCgroupsResponse(cgPidInPodFilePath) - s.requireAttestSuccess(p, testSigstoreSkippedSelectors) -} - -func (s *Suite) requireAttestFailureWithPod(p workloadattestor.WorkloadAttestor, code codes.Code, contains string) { - s.addPodListResponse(podListFilePath) - s.addGetContainerResponsePidInPod() - s.requireAttestFailure(p, code, contains) -} - type osConfig struct { - fakeClient *sigstoreMock } func (o *osConfig) getContainerHelper(p *Plugin) ContainerHelper { return &containerHelper{ rootDir: p.rootDir, - sigstoreClient: o.fakeClient, useNewContainerLocator: true, } } func createOSConfig() *osConfig { - return &osConfig{ - fakeClient: &sigstoreMock{}, - } -} - -type sigstoreMock struct { - selectors []sigstore.SelectorsFromSignatures - - sigs []oci.Signature - skipSigs bool - skippedSigSelectors []string - returnError error - skippedImages map[string]struct{} - allowedSubjects map[string]map[string]struct{} - log hclog.Logger - - rekorURL string - enforceSCT bool -} - -// SetLogger implements sigstore.Sigstore -func (s *sigstoreMock) SetLogger(logger hclog.Logger) { - s.log = logger -} - -func (s *sigstoreMock) SetEnforceSCT(enforceSCT bool) { - s.enforceSCT = enforceSCT -} - -func (s *sigstoreMock) FetchImageSignatures(context.Context, string) ([]oci.Signature, error) { - if s.returnError != nil { - return nil, s.returnError - } - return s.sigs, nil -} - -func (s *sigstoreMock) SelectorValuesFromSignature(oci.Signature) (*sigstore.SelectorsFromSignatures, error) { - if len(s.selectors) != 0 { - return &s.selectors[0], nil - } - return nil, s.returnError -} - -func (s *sigstoreMock) ExtractSelectorsFromSignatures([]oci.Signature, string) []sigstore.SelectorsFromSignatures { - return s.selectors -} - -func (s *sigstoreMock) ShouldSkipImage(string) (bool, error) { - return s.skipSigs, s.returnError -} - -func (s *sigstoreMock) ClearSkipList() { - s.skippedImages = nil -} - -func (s *sigstoreMock) ClearAllowedSubjects() { - s.allowedSubjects = nil -} - -func (s *sigstoreMock) AttestContainerSignatures(_ context.Context, status *corev1.ContainerStatus) ([]string, error) { - if s.skipSigs { - return s.skippedSigSelectors, nil - } - if s.returnError != nil { - return nil, s.returnError - } - var selectorsString []string - for _, selector := range s.selectors { - if selector.Subject != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-subject:%s", status.ContainerID, selector.Subject)) - } - if selector.Content != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-content:%s", status.ContainerID, selector.Content)) - } - if selector.LogID != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-logid:%s", status.ContainerID, selector.LogID)) - } - if selector.IntegratedTime != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-integrated-time:%s", status.ContainerID, selector.IntegratedTime)) - } - selectorsString = append(selectorsString, "sigstore-validation:passed") - } - return selectorsString, nil -} - -func (s *sigstoreMock) SetRekorURL(url string) error { - if s.returnError != nil { - return s.returnError - } - s.rekorURL = url - return s.returnError -} - -func (s *sigstoreMock) AddAllowedSubject(issuer string, subject string) { - if s.allowedSubjects == nil { - s.allowedSubjects = make(map[string]map[string]struct{}) - } - if _, ok := s.allowedSubjects[issuer]; !ok { - s.allowedSubjects[issuer] = make(map[string]struct{}) - } - s.allowedSubjects[issuer][subject] = struct{}{} -} - -func (s *sigstoreMock) AddSkippedImages(images []string) { - if s.skippedImages == nil { - s.skippedImages = make(map[string]struct{}) - } - for _, imageID := range images { - s.skippedImages[imageID] = struct{}{} - } + return &osConfig{} } diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go index 56abdfe782d..97d269334c1 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go @@ -14,9 +14,11 @@ import ( "net/http/httptest" "os" "path/filepath" + "sync" "testing" "time" + "github.com/spiffe/spire/pkg/agent/common/sigstore" "github.com/spiffe/spire/pkg/agent/plugin/workloadattestor" "github.com/spiffe/spire/pkg/common/pemutil" "github.com/spiffe/spire/pkg/common/util" @@ -53,7 +55,7 @@ AyIlgLJ/QQypapKXYPr4kLuFWFShRANCAARFfHk9kz/bGtZfcIhJpzvnSnKbSvuK FwOGLt+I3+9beT0vo+pn9Rq0squewFYe3aJbwpkyfP2xOovQCdm4PC8y -----END PRIVATE KEY----- `)) - + imageID = "docker-pullable://localhost/spiffe/blog@sha256:0cfdaced91cb46dd7af48309799a3c351e4ca2d5e1ee9737ca0cbd932cb79898" testPodSelectors = []*common.Selector{ {Type: "k8s", Value: "node-name:k8s-node-1"}, {Type: "k8s", Value: "ns:default"}, @@ -77,6 +79,10 @@ FwOGLt+I3+9beT0vo+pn9Rq0squewFYe3aJbwpkyfP2xOovQCdm4PC8y {Type: "k8s", Value: "container-name:blog"}, } testPodAndContainerSelectors = append(testPodSelectors, testContainerSelectors...) + + sigstoreSelectors = []*common.Selector{ + {Type: "k8s", Value: "sigstore:selector"}, + } ) type attestResult struct { @@ -117,6 +123,7 @@ func (s *Suite) SetupTest() { } func (s *Suite) TearDownTest() { + s.clock.Add(time.Minute) s.setServer(nil) os.RemoveAll(s.dir) } @@ -139,6 +146,7 @@ func (s *Suite) TestAttestWithPidInPodAfterRetry() { resultCh := s.goAttest(p) + s.clock.WaitForAfter(time.Minute, "waiting for cache expiry timer") s.clock.WaitForAfter(time.Minute, "waiting for retry timer") s.clock.Add(time.Second) s.clock.WaitForAfter(time.Minute, "waiting for retry timer") @@ -167,6 +175,24 @@ func (s *Suite) TestAttestWithPidNotInPodCancelsEarly() { s.Require().Nil(selectors) } +func (s *Suite) TestAttestPodListCache() { + s.startInsecureKubelet() + p := s.loadInsecurePlugin() + + s.addPodListResponse(podListFilePath) + + s.requireAttestSuccessWithPod(p) + s.clock.WaitForAfter(time.Minute, "waiting for cache expiry timer") + + // The pod list is cached so we don't expect a request to kubelet + s.requireAttestSuccessWithPod(p) + + // The cache expires after the clock advances by at least half the retry interval + s.clock.Add(time.Minute) + s.addPodListResponse(podListFilePath) + s.requireAttestSuccessWithPod(p) +} + func (s *Suite) TestAttestWithPidNotInPodAfterRetry() { s.startInsecureKubelet() p := s.loadInsecurePlugin() @@ -179,6 +205,7 @@ func (s *Suite) TestAttestWithPidNotInPodAfterRetry() { resultCh := s.goAttest(p) + s.clock.WaitForAfter(time.Minute, "waiting for cache expiry timer") s.clock.WaitForAfter(time.Minute, "waiting for retry timer") s.clock.Add(time.Second) s.clock.WaitForAfter(time.Minute, "waiting for retry timer") @@ -271,6 +298,19 @@ func (s *Suite) TestAttestWhenContainerReadyButContainerSelectorsDisabled() { s.requireAttestSuccess(p, testPodSelectors) } +func (s *Suite) TestAttestWithSigstoreSelectors() { + s.startInsecureKubelet() + p := s.loadInsecurePluginWithSigstore() + + // Add the expected selectors from the Sigstore verifier + testPodAndContainerSelectors = append(testPodAndContainerSelectors, sigstoreSelectors...) + + s.addPodListResponse(podListFilePath) + s.addGetContainerResponsePidInPod() + + s.requireAttestSuccess(p, testPodAndContainerSelectors) +} + func (s *Suite) TestConfigure() { s.generateCerts("") @@ -291,6 +331,7 @@ func (s *Suite) TestConfigure() { MaxPollAttempts int PollRetryInterval time.Duration ReloadInterval time.Duration + SigstoreConfig *sigstore.Config } testCases := []struct { @@ -524,7 +565,7 @@ func (s *Suite) TestConfigure() { require.NotNil(t, testCase.config, "test case missing expected config") assert.NoError(t, err) - c, _, err := p.getConfig() + c, _, _, err := p.getConfig() require.NoError(t, err) switch { @@ -554,6 +595,77 @@ func (s *Suite) TestConfigure() { } } +func (s *Suite) TestConfigureWithSigstore() { + cases := []struct { + name string + hcl string + expectedError string + want *sigstore.Config + }{ + { + name: "complete sigstore configuration", + hcl: ` + skip_kubelet_verification = true + experimental { + sigstore { + allowed_identities = { + "test-issuer-1" = ["*@example.com", "subject@otherdomain.com"] + "test-issuer-2" = ["domain/ci.yaml@refs/tags/*"] + } + skipped_images = ["registry/image@sha256:examplehash"] + rekor_url = "https://test.dev" + ignore_sct = true + ignore_tlog = true + ignore_attestations = true + registry_credentials = { + "registry-1" = { username = "user1", password = "pass1" } + "registry-2" = { username = "user2", password = "pass2" } + } + } + }`, + expectedError: "", + }, + { + name: "empty sigstore configuration", + hcl: ` + skip_kubelet_verification = true + experimental { sigstore {} } + `, + expectedError: "", + }, + { + name: "invalid HCL", + hcl: ` + skip_kubelet_verification = true + experimental { sigstore = "invalid" } + `, + expectedError: "unable to decode configuration", + }, + } + + for _, tc := range cases { + tc := tc + s.T().Run(tc.name, func(t *testing.T) { + p := s.newPlugin() + + var err error + plugintest.Load(s.T(), builtin(p), nil, + plugintest.Configure(tc.hcl), + plugintest.CaptureConfigureError(&err)) + + if tc.expectedError != "" { + s.RequireGRPCStatusContains(err, codes.InvalidArgument, tc.expectedError) + return + } + require.NoError(t, err) + + _, _, sigstoreVerifier, err := p.getConfig() + require.NoError(t, err) + assert.NotNil(t, sigstoreVerifier) + }) + } +} + func (s *Suite) newPlugin() *Plugin { p := New() p.rootDir = s.dir @@ -599,6 +711,7 @@ func (s *Suite) kubeletPort() int { func (s *Suite) loadPlugin(configuration string) workloadattestor.WorkloadAttestor { v1 := new(workloadattestor.V1) p := s.newPlugin() + plugintest.Load(s.T(), builtin(p), v1, plugintest.Configure(configuration), ) @@ -606,6 +719,11 @@ func (s *Suite) loadPlugin(configuration string) workloadattestor.WorkloadAttest if cHelper := s.oc.getContainerHelper(p); cHelper != nil { p.setContainerHelper(cHelper) } + + // if sigstore is configured, override with fake + if p.sigstoreVerifier != nil { + p.sigstoreVerifier = newFakeSigstoreVerifier(map[string][]string{imageID: {"sigstore:selector"}}) + } return v1 } @@ -614,7 +732,6 @@ func (s *Suite) loadInsecurePlugin() workloadattestor.WorkloadAttestor { kubelet_read_only_port = %d max_poll_attempts = 5 poll_retry_interval = "1s" - use_new_container_locator = true `, s.kubeletPort())) } @@ -623,11 +740,22 @@ func (s *Suite) loadInsecurePluginWithExtra(extraConfig string) workloadattestor kubelet_read_only_port = %d max_poll_attempts = 5 poll_retry_interval = "1s" - use_new_container_locator = true %s `, s.kubeletPort(), extraConfig)) } +func (s *Suite) loadInsecurePluginWithSigstore() workloadattestor.WorkloadAttestor { + return s.loadPlugin(fmt.Sprintf(` + kubelet_read_only_port = %d + max_poll_attempts = 5 + poll_retry_interval = "1s" + experimental { + sigstore { + } + } + `, s.kubeletPort())) +} + func (s *Suite) startInsecureKubelet() { s.setServer(httptest.NewServer(http.HandlerFunc(s.serveHTTP))) } @@ -809,3 +937,26 @@ func (s *Suite) addPodListResponse(fixturePath string) { s.podList = append(s.podList, podList) } + +type fakeSigstoreVerifier struct { + mu sync.Mutex + + SigDetailsSets map[string][]string +} + +func newFakeSigstoreVerifier(selectors map[string][]string) *fakeSigstoreVerifier { + return &fakeSigstoreVerifier{ + SigDetailsSets: selectors, + } +} + +func (v *fakeSigstoreVerifier) Verify(_ context.Context, imageID string) ([]string, error) { + v.mu.Lock() + defer v.mu.Unlock() + + if selectors, found := v.SigDetailsSets[imageID]; found { + return selectors, nil + } + + return nil, fmt.Errorf("failed to verify signature for image %s", imageID) +} diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_windows.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_windows.go index 21f0299b4c3..2d55ea3b247 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_windows.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_windows.go @@ -3,14 +3,12 @@ package k8s import ( - "context" "path/filepath" "github.com/hashicorp/go-hclog" "github.com/spiffe/spire/pkg/common/container/process" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) @@ -28,18 +26,10 @@ type containerHelper struct { ph process.Helper } -func (h *containerHelper) Configure(config *HCLConfig, _ hclog.Logger) error { - if config.Experimental != nil && config.Experimental.Sigstore != nil { - return status.Error(codes.InvalidArgument, "sigstore configuration is not supported on windows environment") - } +func (h *containerHelper) Configure(_ *HCLConfig, _ hclog.Logger) error { return nil } -func (h *containerHelper) GetOSSelectors(context.Context, hclog.Logger, *corev1.ContainerStatus) ([]string, error) { - // No additional selectors on windows - return nil, nil -} - func (h *containerHelper) GetPodUIDAndContainerID(pID int32, log hclog.Logger) (types.UID, string, error) { containerID, err := h.ph.GetContainerIDByProcess(pID, log) if err != nil { diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_windows_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_windows_test.go index 188ae00262e..90efb516289 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_windows_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_windows_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/hashicorp/go-hclog" - "github.com/spiffe/spire/test/plugintest" "github.com/spiffe/spire/test/spiretest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -68,23 +67,6 @@ func (s *Suite) addGetContainerResponsePidInPod() { } } -func (s *Suite) TestFailedToStartWhenUsingSigstore() { - t := s.T() - p := s.newPlugin() - - var err error - plugintest.Load(t, builtin(p), nil, - plugintest.Configure(` - experimental = { - sigstore = { - rekor_url = "https://rekor.org" - } - } - `), - plugintest.CaptureConfigureError(&err)) - spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "sigstore configuration is not supported on windows environment") -} - func TestContainerHelper(t *testing.T) { fakeHelper := &fakeProcessHelper{} cHelper := &containerHelper{ diff --git a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstore.go b/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstore.go deleted file mode 100644 index 07f4c4c5883..00000000000 --- a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstore.go +++ /dev/null @@ -1,519 +0,0 @@ -//go:build !windows - -package sigstore - -import ( - "bytes" - "context" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/url" - "strconv" - - "github.com/google/go-containerregistry/pkg/name" - v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/hashicorp/go-hclog" - "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" - "github.com/sigstore/cosign/v2/pkg/cosign" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci" - rekor "github.com/sigstore/rekor/pkg/generated/client" - "github.com/sigstore/sigstore/pkg/cryptoutils" - "github.com/sigstore/sigstore/pkg/signature/payload" - "github.com/spiffe/spire/pkg/common/telemetry" - corev1 "k8s.io/api/core/v1" -) - -const ( - // Signature Verification Selector - signatureVerifiedSelector = "sigstore-validation:passed" -) - -var ( - // OIDC token issuer Object Identifier - oidcIssuerOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1} -) - -type Sigstore interface { - AttestContainerSignatures(ctx context.Context, status *corev1.ContainerStatus) ([]string, error) - FetchImageSignatures(ctx context.Context, imageName string) ([]oci.Signature, error) - SelectorValuesFromSignature(oci.Signature) (*SelectorsFromSignatures, error) - ExtractSelectorsFromSignatures(signatures []oci.Signature, containerID string) []SelectorsFromSignatures - ShouldSkipImage(imageID string) (bool, error) - AddSkippedImages(imageID []string) - ClearSkipList() - AddAllowedSubject(issuer string, subject string) - ClearAllowedSubjects() - SetRekorURL(rekorURL string) error - SetLogger(logger hclog.Logger) - SetEnforceSCT(enforceSCT bool) -} - -// The following structs are used to go through the payload json objects -type BundleSignature struct { - Content string `json:"content"` - Format string `json:"format"` - PublicKey map[string]string `json:"publicKey"` -} - -type BundleSpec struct { - Data map[string]map[string]string `json:"data"` - Signature BundleSignature `json:"signature"` -} - -type BundleBody struct { - APIVersion string `json:"apiVersion"` - Kind string `json:"kind"` - Spec BundleSpec `json:"spec"` -} - -// Data extracted from signature -type SelectorsFromSignatures struct { - Subject string - Content string - LogID string - IntegratedTime string -} - -func New(cache Cache, logger hclog.Logger) Sigstore { - return &sigstoreImpl{ - functionHooks: sigstoreFunctionHooks{ - // verifyFunction does all the images signatures checks, returning the verified signatures. If there were no valid signatures, it returns an error. - verifyFunction: cosign.VerifyImageSignatures, - fetchImageManifestFunction: remote.Get, - checkOptsFunction: defaultCheckOptsFunction, - }, - - logger: logger, - sigstorecache: cache, - } -} - -type sigstoreImpl struct { - functionHooks sigstoreFunctionHooks - skippedImages map[string]struct{} - subjectAllowList map[string]map[string]struct{} - rekorURL url.URL - logger hclog.Logger - sigstorecache Cache - enforceSCT bool -} - -type sigstoreFunctionHooks struct { - verifyFunction verifyFunctionType - fetchImageManifestFunction fetchImageManifestFunctionType - checkOptsFunction checkOptsFunctionType -} - -func (s *sigstoreImpl) SetEnforceSCT(enforceSCT bool) { - s.enforceSCT = enforceSCT -} - -func (s *sigstoreImpl) SetLogger(logger hclog.Logger) { - s.logger = logger -} - -// FetchImageSignatures retrieves signatures for specified image via cosign, using the specified rekor server. -// Returns a list of verified signatures, and an error if any. -func (s *sigstoreImpl) FetchImageSignatures(ctx context.Context, imageName string) ([]oci.Signature, error) { - ref, err := name.ParseReference(imageName) - if err != nil { - return nil, fmt.Errorf("error parsing image reference: %w", err) - } - - if err := s.ValidateImage(ref); err != nil { - return nil, fmt.Errorf("could not validate image reference digest: %w", err) - } - - co, err := s.functionHooks.checkOptsFunction(ctx, s.rekorURL, s.enforceSCT, s.subjectAllowList) - if err != nil { - return nil, fmt.Errorf("could not create cosign check options: %w", err) - } - sigs, ok, err := s.functionHooks.verifyFunction(ctx, ref, co) - switch { - case err != nil: - return nil, fmt.Errorf("error verifying signature: %w", err) - case !ok: - return nil, fmt.Errorf("bundle not verified for %q", imageName) - default: - return sigs, nil - } -} - -// ExtractSelectorsFromSignatures extracts selectors from a list of image signatures. -// returns a list of selector strings. -func (s *sigstoreImpl) ExtractSelectorsFromSignatures(signatures []oci.Signature, containerID string) []SelectorsFromSignatures { - if len(signatures) == 0 { - s.logger.Error("no signatures found for container", telemetry.ContainerID, containerID) - return nil - } - var selectors []SelectorsFromSignatures - for _, sig := range signatures { - // verify which subject - sigSelectors, err := s.SelectorValuesFromSignature(sig) - if err != nil { - s.logger.Error("error extracting selectors from signature", "error", err, telemetry.ContainerID, containerID) - - continue - } - selectors = append(selectors, *sigSelectors) - } - return selectors -} - -// SelectorValuesFromSignature extracts selectors from a signature. -// returns a list of selectors. -func (s *sigstoreImpl) SelectorValuesFromSignature(signature oci.Signature) (*SelectorsFromSignatures, error) { - subject, err := getSignatureSubject(signature) - if err != nil { - return nil, fmt.Errorf("error getting signature subject: %w", err) - } - if subject == "" { - return nil, errors.New("error getting signature subject: empty subject") - } - - issuer, err := getSignatureProvider(signature) - if err != nil { - return nil, fmt.Errorf("error getting signature provider: %w", err) - } - if issuer == "" { - return nil, fmt.Errorf("error getting signature provider: %w", errors.New("empty issuer")) - } - - // This filter is already done when getting the signature (since cosign 2.0), - // so its response must not contain signatures from disallowed subjects. - if issuerSubjects, ok := s.subjectAllowList[issuer]; !ok { - return nil, fmt.Errorf("signature issuer %q not in allow-list", issuer) - } else if _, ok := issuerSubjects[subject]; !ok { - return nil, fmt.Errorf("subject %q not allowed for issuer %q", subject, issuer) - } - - bundle, err := signature.Bundle() - switch { - case err != nil: - return nil, fmt.Errorf("error getting signature bundle: %w", err) - case bundle.Payload.LogID == "": - return nil, errors.New("error getting signature log ID: empty log ID") - case bundle.Payload.IntegratedTime == 0: - return nil, errors.New("error getting signature integrated time: integrated time is 0") - } - sigContent, err := getBundleSignatureContent(bundle) - if err != nil { - return nil, fmt.Errorf("error getting signature content: %w", err) - } - - return &SelectorsFromSignatures{ - Subject: subject, - Content: sigContent, - LogID: bundle.Payload.LogID, - IntegratedTime: strconv.FormatInt(bundle.Payload.IntegratedTime, 10), - }, nil -} - -// ShouldSkipImage checks the skip list for the image ID in the container status. -// If the image ID is found in the skip list, it returns true. -// If the image ID is not found in the skip list, it returns false. -func (s *sigstoreImpl) ShouldSkipImage(imageID string) (bool, error) { - if imageID == "" { - return false, errors.New("image ID is empty") - } - if len(s.skippedImages) == 0 { - return false, nil - } - _, ok := s.skippedImages[imageID] - return ok, nil -} - -// AddSkippedImage adds the image ID and selectors to the skip list. -func (s *sigstoreImpl) AddSkippedImages(imageIDList []string) { - if s.skippedImages == nil { - s.skippedImages = make(map[string]struct{}) - } - for _, imageID := range imageIDList { - s.skippedImages[imageID] = struct{}{} - } -} - -// ClearSkipList clears the skip list. -func (s *sigstoreImpl) ClearSkipList() { - s.skippedImages = nil -} - -// ValidateImage validates if the image manifest hash matches the digest in the image reference -func (s *sigstoreImpl) ValidateImage(ref name.Reference) error { - dgst, ok := ref.(name.Digest) - if !ok { - return fmt.Errorf("reference %T is not a digest", ref) - } - desc, err := s.functionHooks.fetchImageManifestFunction(dgst) - if err != nil { - return err - } - if len(desc.Manifest) == 0 { - return errors.New("manifest is empty") - } - hash, _, err := v1.SHA256(bytes.NewReader(desc.Manifest)) - if err != nil { - return err - } - - return validateRefDigest(dgst, hash.String()) -} - -func (s *sigstoreImpl) AddAllowedSubject(issuer string, subject string) { - if s.subjectAllowList == nil { - s.subjectAllowList = make(map[string]map[string]struct{}) - } - if _, ok := s.subjectAllowList[issuer]; !ok { - s.subjectAllowList[issuer] = make(map[string]struct{}) - } - s.subjectAllowList[issuer][subject] = struct{}{} -} - -func (s *sigstoreImpl) ClearAllowedSubjects() { - s.subjectAllowList = nil -} - -func (s *sigstoreImpl) AttestContainerSignatures(ctx context.Context, status *corev1.ContainerStatus) ([]string, error) { - skip, err := s.ShouldSkipImage(status.ImageID) - if err != nil { - return nil, fmt.Errorf("failed attesting container signature: %w", err) - } - if skip { - return []string{signatureVerifiedSelector}, nil - } - - imageID := status.ImageID - - cachedSignature := s.sigstorecache.GetSignature(imageID) - if cachedSignature != nil { - s.logger.Debug("Found cached signature", "image_id", imageID) - } else { - signatures, err := s.FetchImageSignatures(ctx, imageID) - if err != nil { - return nil, err - } - - selectors := s.ExtractSelectorsFromSignatures(signatures, status.ContainerID) - - cachedSignature = &Item{ - Key: imageID, - Value: selectors, - } - - s.logger.Debug("Caching signature", "image_id", imageID) - s.sigstorecache.PutSignature(*cachedSignature) - } - - var selectorsString []string - if len(cachedSignature.Value) > 0 { - for _, selector := range cachedSignature.Value { - toString := selectorsToString(selector, status.ContainerID) - selectorsString = append(selectorsString, toString...) - } - selectorsString = append(selectorsString, signatureVerifiedSelector) - } - - return selectorsString, nil -} - -func (s *sigstoreImpl) SetRekorURL(rekorURL string) error { - if rekorURL == "" { - return errors.New("rekor URL is empty") - } - rekorURI, err := url.Parse(rekorURL) - if err != nil { - return fmt.Errorf("failed parsing rekor URI: %w", err) - } - if rekorURI.Host == "" { - return fmt.Errorf("host is required on rekor URL") - } - if rekorURI.Scheme != "https" { - return fmt.Errorf("invalid rekor URL Scheme %q", rekorURI.Scheme) - } - s.rekorURL = *rekorURI - return nil -} - -func defaultCheckOptsFunction(ctx context.Context, rekorURL url.URL, enforceSCT bool, allowedSubjects map[string]map[string]struct{}) (*cosign.CheckOpts, error) { - switch { - case rekorURL.Host == "": - return nil, errors.New("rekor URL host is empty") - case rekorURL.Scheme == "": - return nil, errors.New("rekor URL scheme is empty") - case rekorURL.Path == "": - return nil, errors.New("rekor URL path is empty") - } - - rootCerts, err := fulcio.GetRoots() - if err != nil { - return nil, fmt.Errorf("failed to get fulcio root certificates: %w", err) - } - - rekorPubKeys, err := cosign.GetRekorPubs(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get Rekor public keys: %w", err) - } - - ctLogPubKeys, err := cosign.GetCTLogPubs(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get CTLog public keys: %w", err) - } - - intermediateCerts, err := fulcio.GetIntermediates() - if err != nil { - return nil, fmt.Errorf("failed to get Fulcio intermediate certificates: %w", err) - } - - var allowedIdentities []cosign.Identity - for issuer, subjects := range allowedSubjects { - for subject := range subjects { - allowedIdentities = append(allowedIdentities, cosign.Identity{ - Issuer: issuer, - Subject: subject, - }) - } - } - - cfg := rekor.DefaultTransportConfig().WithBasePath(rekorURL.Path).WithHost(rekorURL.Host) - co := &cosign.CheckOpts{ - // Set the rekor client - RekorClient: rekor.NewHTTPClientWithConfig(nil, cfg), - RootCerts: rootCerts, - RekorPubKeys: rekorPubKeys, - CTLogPubKeys: ctLogPubKeys, - Identities: allowedIdentities, - IntermediateCerts: intermediateCerts, - IgnoreSCT: !enforceSCT, - } - - return co, nil -} - -func getSignatureSubject(signature oci.Signature) (string, error) { - if signature == nil { - return "", errors.New("signature is nil") - } - ss := payload.SimpleContainerImage{} - pl, err := signature.Payload() - if err != nil { - return "", err - } - if pl == nil { - return "", errors.New("signature payload is nil") - } - if err := json.Unmarshal(pl, &ss); err != nil { - return "", err - } - cert, err := signature.Cert() - if err != nil { - return "", fmt.Errorf("failed to access signature certificate: %w", err) - } - if cert != nil { - // The CertSubject method used to be called here. It has since been - // removed in favore of GetSubjectAlternateNames method, which returns - // more SANs. In any case, preserve the previous behavior wherein a - // cert without a subject caused this function to return an empty - // subject and no error. - if sans := cryptoutils.GetSubjectAlternateNames(cert); len(sans) > 0 { - return sans[0], nil - } - return "", nil - } - if len(ss.Optional) > 0 { - if subjString, ok := ss.Optional["subject"]; ok { - if subj, ok := subjString.(string); ok { - return subj, nil - } - } - } - - return "", errors.New("no subject found in signature") -} - -func getSignatureProvider(signature oci.Signature) (string, error) { - if signature == nil { - return "", errors.New("signature is nil") - } - cert, err := signature.Cert() - if err != nil { - return "", fmt.Errorf("failed to access signature certificate: %w", err) - } - if cert == nil { - return "", errors.New("no certificate found in signature") - } - return certOIDCProvider(cert) -} - -func certOIDCProvider(cert *x509.Certificate) (string, error) { - if cert == nil { - return "", errors.New("certificate is nil") - } - - for _, ext := range cert.Extensions { - if ext.Id.Equal(oidcIssuerOID) { - return string(ext.Value), nil - } - } - - return "", errors.New("no OIDC issuer found in certificate extensions") -} - -func getBundleSignatureContent(bundle *bundle.RekorBundle) (string, error) { - if bundle == nil { - return "", errors.New("bundle is nil") - } - body64, ok := bundle.Payload.Body.(string) - if !ok { - return "", fmt.Errorf("expected payload body to be a string but got %T instead", bundle.Payload.Body) - } - body, err := base64.StdEncoding.DecodeString(body64) - if err != nil { - return "", err - } - var bundleBody BundleBody - if err := json.Unmarshal(body, &bundleBody); err != nil { - return "", fmt.Errorf("failed to parse bundle body: %w", err) - } - - if bundleBody.Spec.Signature.Content == "" { - return "", errors.New("bundle payload body has no signature content") - } - - return bundleBody.Spec.Signature.Content, nil -} - -func selectorsToString(selectors SelectorsFromSignatures, containerID string) []string { - var selectorsString []string - if selectors.Subject != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-subject:%s", containerID, selectors.Subject)) - } - if selectors.Content != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-content:%s", containerID, selectors.Content)) - } - if selectors.LogID != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-logid:%s", containerID, selectors.LogID)) - } - if selectors.IntegratedTime != "" { - selectorsString = append(selectorsString, fmt.Sprintf("%s:image-signature-integrated-time:%s", containerID, selectors.IntegratedTime)) - } - return selectorsString -} - -func validateRefDigest(dgst name.Digest, digest string) error { - if dgst.DigestStr() == digest { - return nil - } - return fmt.Errorf("digest %s does not match %s", digest, dgst.DigestStr()) -} - -type verifyFunctionType func(context.Context, name.Reference, *cosign.CheckOpts) ([]oci.Signature, bool, error) - -type fetchImageManifestFunctionType func(name.Reference, ...remote.Option) (*remote.Descriptor, error) - -type checkOptsFunctionType func(context.Context, url.URL, bool, map[string]map[string]struct{}) (*cosign.CheckOpts, error) diff --git a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstore_test.go b/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstore_test.go deleted file mode 100644 index 03de36e47e6..00000000000 --- a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstore_test.go +++ /dev/null @@ -1,2113 +0,0 @@ -//go:build !windows - -package sigstore - -import ( - "bytes" - "context" - "crypto/x509" - "crypto/x509/pkix" - "errors" - "fmt" - "net/url" - "testing" - - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/hashicorp/go-hclog" - "github.com/sigstore/cosign/v2/cmd/cosign/cli/fulcio" - "github.com/sigstore/cosign/v2/pkg/cosign" - "github.com/sigstore/cosign/v2/pkg/cosign/bundle" - "github.com/sigstore/cosign/v2/pkg/oci" - rekor "github.com/sigstore/rekor/pkg/generated/client" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" -) - -const ( - maximumAmountCache = 10 -) - -func TestNew(t *testing.T) { - newcache := NewCache(maximumAmountCache) - want := &sigstoreImpl{ - functionHooks: sigstoreFunctionHooks{ - verifyFunction: cosign.VerifyImageSignatures, - fetchImageManifestFunction: remote.Get, - checkOptsFunction: defaultCheckOptsFunction, - }, - skippedImages: nil, - subjectAllowList: nil, - rekorURL: url.URL{}, - sigstorecache: newcache, - logger: nil, - } - sigstore := New(newcache, nil) - - require.IsType(t, &sigstoreImpl{}, sigstore) - sigImpObj, _ := sigstore.(*sigstoreImpl) - - // test each field manually since require.Equal does not work on function pointers - if &(sigImpObj.functionHooks.verifyFunction) == &(want.functionHooks.verifyFunction) { - t.Errorf("verify functions do not match") - } - if &(sigImpObj.functionHooks.fetchImageManifestFunction) == &(want.functionHooks.fetchImageManifestFunction) { - t.Errorf("fetchImageManifest functions do not match") - } - if &(sigImpObj.functionHooks.checkOptsFunction) == &(want.functionHooks.checkOptsFunction) { - t.Errorf("checkOptsFunction functions do not match") - } - require.Empty(t, sigImpObj.skippedImages, "skippedImages array is not empty") - require.Empty(t, sigImpObj.subjectAllowList, "subjectAllowList array is not empty") - require.Equal(t, want.rekorURL, sigImpObj.rekorURL, "rekorURL is different from rekor default") - require.Equal(t, want.sigstorecache, sigImpObj.sigstorecache, "sigstorecache is different from fresh object") - require.Nil(t, sigImpObj.logger, "new logger is not nil") -} - -func TestSigstoreimpl_TestDefaultCheckOptsFunction(t *testing.T) { - ctx := context.Background() - - for _, tt := range []struct { - name string - rekorURL url.URL - enforceSCT bool - allowedSubjects map[string]map[string]struct{} - - expectErr string - expectIdentities []cosign.Identity - }{ - { - name: "opts with multiple identities", - rekorURL: rekorDefaultURL(), - enforceSCT: true, - allowedSubjects: map[string]map[string]struct{}{ - "foo": { - "s1": {}, - "s2": {}, - }, - "bar": { - "x1": {}, - }, - }, - expectIdentities: []cosign.Identity{ - { - Issuer: "bar", - Subject: "x1", - }, - { - Issuer: "foo", - Subject: "s1", - }, - { - Issuer: "foo", - Subject: "s2", - }, - }, - }, - { - name: "ignore SCT", - rekorURL: rekorDefaultURL(), - enforceSCT: false, - allowedSubjects: map[string]map[string]struct{}{ - "foo": { - "s1": {}, - }, - }, - expectIdentities: []cosign.Identity{ - { - Issuer: "foo", - Subject: "s1", - }, - }, - }, - { - name: "empty url", - rekorURL: url.URL{}, - enforceSCT: false, - expectErr: "rekor URL host is empty", - }, - { - name: "empty scheme", - rekorURL: url.URL{ - Scheme: "", - Host: "foo", - Path: "bar", - }, - enforceSCT: false, - expectErr: "rekor URL scheme is empty", - }, - { - name: "empty path", - rekorURL: url.URL{ - Scheme: "http", - Host: "foo", - Path: "", - }, - enforceSCT: false, - expectErr: "rekor URL path is empty", - }, - } { - t.Run(tt.name, func(t *testing.T) { - co, err := defaultCheckOptsFunction(ctx, tt.rekorURL, tt.enforceSCT, tt.allowedSubjects) - if tt.expectErr != "" { - require.EqualError(t, err, tt.expectErr) - require.Nil(t, co) - return - } - - require.NoError(t, err) - require.NotNil(t, co) - - assert.NotNil(t, co.RekorClient) - assert.NotEmpty(t, co.RootCerts) - assert.NotEmpty(t, co.RekorPubKeys) - assert.NotEmpty(t, co.CTLogPubKeys) - assert.NotEmpty(t, co.IntermediateCerts) - - require.ElementsMatch(t, co.Identities, tt.expectIdentities) - require.Equal(t, co.IgnoreSCT, !tt.enforceSCT) - }) - } -} - -func TestSigstoreimpl_FetchImageSignatures(t *testing.T) { - ctx := context.Background() - type fields struct { - functionBindings sigstoreFunctionBindings - rekorURL url.URL - } - enforceSCT := true - - allowedSubjects := make(map[string]map[string]struct{}) - allowedSubjects["issuer"] = map[string]struct{}{ - "subject": {}, - } - - defaultCheckOpts, err := defaultCheckOptsFunction(ctx, rekorDefaultURL(), enforceSCT, allowedSubjects) - require.NoError(t, err) - emptyURLCheckOpts, emptyError := defaultCheckOptsFunction(ctx, url.URL{}, enforceSCT, map[string]map[string]struct{}{}) - require.Nil(t, emptyURLCheckOpts) - require.EqualError(t, emptyError, "rekor URL host is empty") - - tests := []struct { - name string - fields fields - imageName string - wantedFetchArguments fetchFunctionArguments - wantedVerifyArguments verifyFunctionArguments - wantedCheckOptsArguments checkOptsFunctionArguments - want []oci.Signature - wantedErr error - }{ - { - name: "fetch image with signature", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction([]oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - }, true, nil), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - }, - }, - { - name: "fetch image with 2 signatures", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction([]oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 4","key3": "value 5"}}`), - }, - }, true, nil), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 4","key3": "value 5"}}`), - }, - }, - }, - { - name: "fetch image with no signature", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction(nil, true, errors.New("no matching signatures 2")), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: nil, - wantedErr: fmt.Errorf("error verifying signature: %w", errors.New("no matching signatures 2")), - }, - { // TODO: check again, same as above test. should never happen, since the verify function returns an error on empty verified signature list - name: "fetch image with no signature and no error", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction(nil, true, nil), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: nil, - wantedErr: nil, - }, - { - name: "fetch image with signature and error", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction([]oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - }, true, errors.New("unexpected error")), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: nil, - wantedErr: fmt.Errorf("error verifying signature: %w", errors.New("unexpected error")), - }, - { - name: "fetch image with signature no error, bundle not verified", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction([]oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - }, false, nil), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: nil, - wantedErr: fmt.Errorf("bundle not verified for %q", "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - }, - { - name: "fetch image with invalid image reference", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createNilVerifyFunction(), - fetchBinding: createNilFetchFunction(), - checkOptsBinding: createNilCheckOptsFunction(), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "invali|].url.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - want: nil, - wantedErr: fmt.Errorf("error parsing image reference: %w", errors.New("could not parse reference: invali|].url.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505")), - }, - { - name: "fetch image with signature, empty rekor url", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createNilVerifyFunction(), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createCheckOptsFunction(emptyURLCheckOpts, emptyError), - }, - rekorURL: url.URL{}, - }, - imageName: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{}, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: url.URL{}, - }, - want: nil, - wantedErr: fmt.Errorf("could not create cosign check options: %w", emptyError), - }, - { - name: "fetch image with wrong image hash", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createNilVerifyFunction(), - fetchBinding: createFetchFunction(&remote.Descriptor{Manifest: []byte("sometext")}, nil), - checkOptsBinding: createNilCheckOptsFunction(), - }, - rekorURL: rekorDefaultURL(), - }, - imageName: "docker-registry.com/some/image@sha256:4fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:4fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{}, - wantedCheckOptsArguments: checkOptsFunctionArguments{}, - want: nil, - wantedErr: fmt.Errorf("could not validate image reference digest: %w", errors.New("digest sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505 does not match sha256:4fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505")), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fetchArguments := &fetchFunctionArguments{} - verifyArguments := &verifyFunctionArguments{} - checkOptsArguments := &checkOptsFunctionArguments{} - sigstore := sigstoreImpl{ - functionHooks: sigstoreFunctionHooks{ - verifyFunction: tt.fields.functionBindings.verifyBinding(t, verifyArguments), - fetchImageManifestFunction: tt.fields.functionBindings.fetchBinding(t, fetchArguments), - checkOptsFunction: tt.fields.functionBindings.checkOptsBinding(t, checkOptsArguments), - }, - sigstorecache: NewCache(maximumAmountCache), - rekorURL: tt.fields.rekorURL, - } - got, err := sigstore.FetchImageSignatures(context.Background(), tt.imageName) - if tt.wantedErr != nil { - require.EqualError(t, err, tt.wantedErr.Error()) - } else { - require.NoError(t, err) - } - - require.Equal(t, tt.want, got) - - require.Equal(t, tt.wantedFetchArguments, *fetchArguments) - - require.Equal(t, tt.wantedCheckOptsArguments, *checkOptsArguments) - - require.Equal(t, tt.wantedVerifyArguments, *verifyArguments) - }) - } -} - -func TestSigstoreimpl_ExtractSelectorsFromSignatures(t *testing.T) { - tests := []struct { - name string - signatures []oci.Signature - containerID string - subjectAllowList map[string]map[string]struct{} - want []SelectorsFromSignatures - wantLog string - }{ - { - name: "extract selector from single image signature array", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - }, - containerID: "000000", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: []SelectorsFromSignatures{ - { - Subject: "spirex@example.com", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", - LogID: "samplelogID", - IntegratedTime: "12345", - }, - }, - }, - { - name: "extract selector from image signature array with multiple entries", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex1@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID1", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex1@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex2@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUI9IgogICAgfQogIH0KfQo=", - LogID: "samplelogID2", - IntegratedTime: 12346, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex2@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - }, - containerID: "111111", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex1@example.com": struct{}{}, "spirex2@example.com": struct{}{}}, - }, - want: []SelectorsFromSignatures{ - { - Subject: "spirex1@example.com", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", - LogID: "samplelogID1", - IntegratedTime: "12345", - }, - { - Subject: "spirex2@example.com", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smB=", - LogID: "samplelogID2", - IntegratedTime: "12346", - }, - }, - }, - { - name: "with nil payload", - signatures: []oci.Signature{ - signature{ - payload: nil, - }, - }, - containerID: "222222", - want: nil, - wantLog: "signature payload is nil", - }, - { - name: "extract selector from image signature with subject certificate", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "some reference"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"}}`), - cert: &x509.Certificate{ - EmailAddresses: []string{ - "spirex@example.com", - "spirex2@example.com", - }, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - }, - }, - containerID: "333333", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}, "spirex2@example.com": struct{}{}}, - }, - want: []SelectorsFromSignatures{ - { - Subject: "spirex@example.com", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", - LogID: "samplelogID", - IntegratedTime: "12345", - }, - }, - }, - { - name: "extract selector from image signature with URI certificate", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "some reference"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"}}`), - cert: &x509.Certificate{ - URIs: []*url.URL{ - { - Scheme: "https", - Host: "www.example.com", - Path: "somepath1", - }, - { - Scheme: "https", - Host: "www.spirex.com", - Path: "somepath2", - }, - }, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - }, - }, - containerID: "444444", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"https://www.example.com/somepath1": struct{}{}}, - }, - want: []SelectorsFromSignatures{ - { - Subject: "https://www.example.com/somepath1", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", - LogID: "samplelogID", - IntegratedTime: "12345", - }, - }, - }, - { - name: "extract selector from empty array", - signatures: []oci.Signature{}, - containerID: "555555", - want: nil, - wantLog: "no signatures found for container: container_id=555555", - }, - { - name: "extract selector from nil array", - signatures: nil, - containerID: "666666", - want: nil, - wantLog: "no signatures found for container: container_id=666666", - }, - { - name: "invalid payload", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{a"critical": {}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - }, - }, - containerID: "777777", - want: nil, - wantLog: "error getting signature subject: invalid character 'a' looking for beginning of object key string", - }, - { - name: "extract selector from single image signature array with error getting provider", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: nil, - }, - }, - containerID: "888888", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: nil, - wantLog: "error extracting selectors from signature: error=\"error getting signature provider: no certificate found in signature\" container_id=888888", - }, - { - name: "extract selector from single image signature array with empty provider", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(``), - }}, - }, - }, - }, - containerID: "999999", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: nil, - wantLog: "error extracting selectors from signature: error=\"error getting signature provider: empty issuer\" container_id=999999", - }, - { - name: "extract selector from single image signature array with no provider extension", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, Extensions: []pkix.Extension{}, - }, - }, - }, - containerID: "101010", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: nil, - wantLog: "error extracting selectors from signature: error=\"error getting signature provider: no OIDC issuer found in certificate extensions\" container_id=101010", - }, - { - name: "extract selector from single image signature array, error no log id", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - }, - containerID: "101101", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: nil, - wantLog: "error extracting selectors from signature: error=\"error getting signature log ID: empty log ID\" container_id=101101", - }, - { - name: "extract selector from single image signature array, error no integrated time", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 0, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - }, - containerID: "121212", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: nil, - wantLog: "error extracting selectors from signature: error=\"error getting signature integrated time: integrated time is 0\" container_id=121212", - }, - { - name: "extract selector from single image signature array, issuer not in allowlist", - signatures: []oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer2`), - }}, - }, - }, - }, - containerID: "131313", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - want: nil, - wantLog: "error extracting selectors from signature: error=\"signature issuer \\\"issuer2\\\" not in allow-list\" container_id=131313", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - buf := bytes.Buffer{} - newLog := hclog.New(&hclog.LoggerOptions{ - Output: &buf, - }) - s := sigstoreImpl{ - logger: newLog, - subjectAllowList: tt.subjectAllowList, - } - got := s.ExtractSelectorsFromSignatures(tt.signatures, tt.containerID) - require.Equal(t, tt.want, got) - if len(tt.wantLog) > 0 { - require.Contains(t, buf.String(), tt.wantLog) - } - }) - } -} - -func TestSigstoreimpl_ShouldSkipImage(t *testing.T) { - tests := []struct { - name string - skippedImages map[string]struct{} - imageID string - want bool - wantedErr error - }{ - { - name: "skipping only image in list", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - }, - imageID: "sha256:sampleimagehash", - want: true, - }, - { - name: "skipping image in list", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash2": {}, - "sha256:sampleimagehash3": {}, - }, - imageID: "sha256:sampleimagehash2", - want: true, - }, - { - name: "image not in list", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash3": {}, - }, - imageID: "sha256:sampleimagehash2", - want: false, - }, - { - name: "empty skip list", - skippedImages: nil, - imageID: "sha256:sampleimagehash", - want: false, - }, - { - name: "empty imageID", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash2": {}, - "sha256:sampleimagehash3": {}, - }, - imageID: "", - want: false, - wantedErr: errors.New("image ID is empty"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := sigstoreImpl{ - skippedImages: tt.skippedImages, - } - got, err := sigstore.ShouldSkipImage(tt.imageID) - if tt.wantedErr != nil { - require.EqualError(t, err, tt.wantedErr.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.want, got) - }) - } -} - -func TestSigstoreimpl_AddSkippedImage(t *testing.T) { - tests := []struct { - name string - skippedImages map[string]struct{} - imageID []string - want map[string]struct{} - }{ - { - name: "add skipped image to empty map", - imageID: []string{"sha256:sampleimagehash"}, - want: map[string]struct{}{ - "sha256:sampleimagehash": {}, - }, - }, - { - name: "add skipped image", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash1": {}, - }, - imageID: []string{"sha256:sampleimagehash"}, - want: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash1": {}, - }, - }, - { - name: "add a list of skipped images to empty map", - imageID: []string{"sha256:sampleimagehash", "sha256:sampleimagehash1"}, - want: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash1": {}, - }, - }, - { - name: "add a list of skipped images to a existing map", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - }, - imageID: []string{"sha256:sampleimagehash1", "sha256:sampleimagehash2"}, - want: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash1": {}, - "sha256:sampleimagehash2": {}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := sigstoreImpl{ - skippedImages: tt.skippedImages, - } - sigstore.AddSkippedImages(tt.imageID) - require.Equal(t, tt.want, sigstore.skippedImages) - }) - } -} - -func TestSigstoreimpl_ClearSkipList(t *testing.T) { - tests := []struct { - skippedImages map[string]struct{} - name string - }{ - { - name: "clear single image in map", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - }, - }, - { - name: "clear multiple images map", - skippedImages: map[string]struct{}{ - "sha256:sampleimagehash": {}, - "sha256:sampleimagehash1": {}, - }, - }, - { - name: "clear on empty map", - skippedImages: map[string]struct{}{}, - }, - { - name: "clear on nil map", - skippedImages: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := &sigstoreImpl{ - skippedImages: tt.skippedImages, - } - sigstore.ClearSkipList() - require.Empty(t, sigstore.skippedImages) - }) - } -} - -func TestSigstoreimpl_ValidateImage(t *testing.T) { - type fields struct { - verifyFunction verifyFunctionBinding - fetchImageManifestFunction fetchFunctionBinding - } - tests := []struct { - name string - fields fields - ref name.Reference - wantedFetchArguments fetchFunctionArguments - wantedVerifyArguments verifyFunctionArguments - wantedErr error - }{ - { - name: "validate image", - fields: fields{ - verifyFunction: createNilVerifyFunction(), - fetchImageManifestFunction: createFetchFunction(&remote.Descriptor{ - Manifest: []byte(`sometext`), - }, nil), - }, - ref: name.MustParseReference("example.com/sampleimage@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("example.com/sampleimage@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{}, - }, - { - name: "error on image manifest fetch", - fields: fields{ - verifyFunction: createNilVerifyFunction(), - fetchImageManifestFunction: createFetchFunction(nil, errors.New("fetch error 123")), - }, - ref: name.MustParseReference("example.com/sampleimage@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("example.com/sampleimage@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedErr: errors.New("fetch error 123"), - }, - { - name: "nil image manifest fetch", - fields: fields{ - verifyFunction: createNilVerifyFunction(), - fetchImageManifestFunction: createFetchFunction(&remote.Descriptor{ - Manifest: nil, - }, nil), - }, - ref: name.MustParseReference("example.com/sampleimage@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("example.com/sampleimage@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedErr: errors.New("manifest is empty"), - }, - { - name: "validate hash manifest", - fields: fields{ - verifyFunction: createNilVerifyFunction(), - fetchImageManifestFunction: createFetchFunction(&remote.Descriptor{ - Manifest: []byte("f0c62edf734ff52ee830c9eeef2ceefad94f7f089706d170f8d9dc64befb57cc"), - }, nil), - }, - ref: name.MustParseReference("example.com/sampleimage@sha256:f037cc8ec4cd38f95478773741fdecd48d721a527d19013031692edbf95fae69"), - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("example.com/sampleimage@sha256:f037cc8ec4cd38f95478773741fdecd48d721a527d19013031692edbf95fae69"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fetchArguments := fetchFunctionArguments{} - verifyArguments := verifyFunctionArguments{} - sigstore := &sigstoreImpl{ - functionHooks: sigstoreFunctionHooks{ - verifyFunction: tt.fields.verifyFunction(t, &verifyArguments), - fetchImageManifestFunction: tt.fields.fetchImageManifestFunction(t, &fetchArguments), - }, - } - - err := sigstore.ValidateImage(tt.ref) - if tt.wantedErr != nil { - require.EqualError(t, err, tt.wantedErr.Error()) - } else { - require.NoError(t, err) - } - - require.Equal(t, tt.wantedFetchArguments, fetchArguments) - require.Equal(t, tt.wantedVerifyArguments, verifyArguments) - }) - } -} - -func TestSigstoreimpl_AddAllowedSubject(t *testing.T) { - tests := []struct { - name string - subjectAllowList map[string]map[string]struct{} - issuer string - subject string - want map[string]map[string]struct{} - }{ - { - name: "add allowed subject to nil map", - subjectAllowList: nil, - issuer: "issuer1", - subject: "spirex@example.com", - want: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - }, - { - name: "add allowed subject to empty map", - subjectAllowList: map[string]map[string]struct{}{}, - issuer: "issuer1", - subject: "spirex@example.com", - want: map[string]map[string]struct{}{ - "issuer1": {"spirex@example.com": struct{}{}}, - }, - }, - { - name: "add allowed subject to existing map", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - issuer: "issuer1", - subject: "spirex4@example.com", - want: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - }, - { - name: "add allowed subject to existing map with new issuer", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - - issuer: "issuer2", - subject: "spirex4@example.com", - - want: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - "issuer2": { - "spirex4@example.com": struct{}{}, - }, - }, - }, - { - name: "add existing allowed subject to existing map with new issuer", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - issuer: "issuer2", - subject: "spirex4@example.com", - want: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - "issuer2": { - "spirex4@example.com": struct{}{}, - }, - }, - }, - { - name: "add existing allowed subject to existing map with same issuer", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - issuer: "issuer1", - subject: "spirex4@example.com", - want: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := &sigstoreImpl{ - subjectAllowList: tt.subjectAllowList, - } - sigstore.AddAllowedSubject(tt.issuer, tt.subject) - require.Equal(t, tt.want, sigstore.subjectAllowList) - }) - } -} - -func TestSigstoreimpl_ClearAllowedSubjects(t *testing.T) { - tests := []struct { - name string - subjectAllowList map[string]map[string]struct{} - }{ - { - name: "clear existing map", - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - }, - }, - { - name: "clear map with multiple issuers", - - subjectAllowList: map[string]map[string]struct{}{ - "issuer1": { - "spirex1@example.com": struct{}{}, - "spirex2@example.com": struct{}{}, - "spirex3@example.com": struct{}{}, - "spirex4@example.com": struct{}{}, - "spirex5@example.com": struct{}{}, - }, - "issuer2": { - "spirex1@example.com": struct{}{}, - "spirex6@example.com": struct{}{}, - }, - }, - }, - { - name: "clear empty map", - subjectAllowList: map[string]map[string]struct{}{}, - }, - { - name: "clear nil map", - subjectAllowList: nil, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := &sigstoreImpl{ - subjectAllowList: tt.subjectAllowList, - } - sigstore.ClearAllowedSubjects() - require.Empty(t, sigstore.subjectAllowList) - }) - } -} - -func TestSigstoreimpl_SelectorValuesFromSignature(t *testing.T) { - tests := []struct { - name string - signature oci.Signature - containerID string - subjectAllowList map[string][]string - want *SelectorsFromSignatures - wantedErr error - }{ - { - name: "selector from signature", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: &SelectorsFromSignatures{ - Subject: "spirex@example.com", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", - LogID: "samplelogID", - IntegratedTime: "12345", - }, - }, - { - name: "selector from signature, empty subject", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: nil, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "111111", - subjectAllowList: nil, - want: nil, - wantedErr: fmt.Errorf("error getting signature subject: empty subject"), - }, - { - name: "selector from signature, not in allowlist", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex1@example.com","key2": "value 2","key3": "value 3"}}`), - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex1@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "222222", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex2@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("subject %q not allowed for issuer %q", "spirex1@example.com", "issuer1"), - }, - { - name: "selector from signature, allowedlist enabled, in allowlist", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "333333", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: &SelectorsFromSignatures{ - Subject: "spirex@example.com", - Content: "MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", - LogID: "samplelogID", - IntegratedTime: "12345", - }, - }, - { - name: "selector from signature, allowedlist enabled, in allowlist, empty content", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiIgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "444444", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature content: bundle payload body has no signature content"), - }, - { - name: "selector from signature, nil bundle", - signature: nilBundleSignature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "555555", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature bundle: no bundle test"), - }, - { - name: "selector from signature, bundle payload body is not a string", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: 42, - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature content: expected payload body to be a string but got int instead"), - }, - { - name: "selector from signature, bundle payload body is not valid base64", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "abc..........def", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature content: illegal base64 data at input byte 3"), - }, - { - name: "selector from signature, bundle payload body has no signature content", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICAgInNwZWMiOiB7CiAgICAgICJzaWduYXR1cmUiOiB7CiAgICAgIH0KICAgIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature content: bundle payload body has no signature content"), - }, - { - name: "selector from signature, bundle payload body signature content is empty", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICAgInNwZWMiOiB7CiAgICAgICAgInNpZ25hdHVyZSI6IHsKICAgICAgICAiY29udGVudCI6ICIiCiAgICAgICAgfQogICAgfQp9", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature content: bundle payload body has no signature content"), - }, - { - name: "selector from signature, bundle payload body is not a valid JSON", - signature: signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjosLCB7CiAgICAic2lnbmF0dXJlIjogewogICAgICAiY29udGVudCI6ICJNRVVDSVFDeWVtOEdjcjBzUEZNUDdmVFhhekNONTdOY041K01qeEp3OU9vMHgyZU0rQUlnZGdCUDk2Qk8xVGUvTmRiakhiVWViMEJVeWU2ZGVSZ1Z0UUV2NU5vNXNtQT0iCiAgICB9CiAgfQp9", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - want: nil, - wantedErr: fmt.Errorf("error getting signature content: failed to parse bundle body: invalid character ',' looking for beginning of value"), - }, - { - name: "selector from signature, empty signature array", - signature: nil, - containerID: "000000", - want: nil, - wantedErr: errors.New("error getting signature subject: signature is nil"), - }, - { - name: "selector from signature, single image signature, no payload", - signature: noPayloadSignature{}, - containerID: "000000", - want: nil, - wantedErr: errors.New("error getting signature subject: no payload test"), - }, - { - name: "selector from signature, single image signature, no certs", - signature: &noCertSignature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - }, - containerID: "000000", - want: nil, - wantedErr: errors.New("error getting signature subject: failed to access signature certificate: no cert test"), - }, - { - name: "selector from signature, single image signature,garbled subject in signature", - signature: &signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "some digest"},"type": "some type"},"optional": {"subject": "s\\\\||as\0\0aasdasd/....???/.>wd12<><,,,><{}{pirex@example.com","key2": "value 2","key3": "value 3"}}`), - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - containerID: "000000", - want: nil, - wantedErr: errors.New("error getting signature subject: invalid character '0' in string escape code"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := &sigstoreImpl{ - subjectAllowList: nil, - logger: hclog.Default(), - } - for issuer, subjects := range tt.subjectAllowList { - for _, subject := range subjects { - sigstore.AddAllowedSubject(issuer, subject) - } - } - got, err := sigstore.SelectorValuesFromSignature(tt.signature) - assert.Equal(t, tt.want, got) - if tt.wantedErr != nil { - require.EqualError(t, err, tt.wantedErr.Error()) - return - } - require.NoError(t, err) - }) - } -} - -func TestSigstoreimpl_AttestContainerSignatures(t *testing.T) { - type fields struct { - functionBindings sigstoreFunctionBindings - skippedImages map[string]struct{} - rekorURL url.URL - subjectAllowList map[string][]string - } - - rootCerts, err := fulcio.GetRoots() - require.NoError(t, err) - intermediateCerts, err := fulcio.GetIntermediates() - require.NoError(t, err) - - defaultCheckOpts := &cosign.CheckOpts{ - RekorClient: rekor.NewHTTPClientWithConfig(nil, rekor.DefaultTransportConfig().WithBasePath(rekorDefaultURL().Path).WithHost(rekorDefaultURL().Host)), - RootCerts: rootCerts, - IntermediateCerts: intermediateCerts, - } - var emptyURLCheckOpts *cosign.CheckOpts - emptyError := errors.New("rekor URL host is empty") - - tests := []struct { - name string - fields fields - status corev1.ContainerStatus - wantedFetchArguments fetchFunctionArguments - wantedVerifyArguments verifyFunctionArguments - wantedCheckOptsArguments checkOptsFunctionArguments - want []string - wantedErr error - }{ - { - name: "Attest image with signature", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction([]oci.Signature{ - signature{ - payload: []byte(`{"critical": {"identity": {"docker-reference": "docker-registry.com/some/image"},"image": {"docker-manifest-digest": "02c15a8d1735c65bb8ca86c716615d3c0d8beb87dc68ed88bb49192f90b184e2"},"type": "some type"},"optional": {"subject": "spirex@example.com","key2": "value 2","key3": "value 3"}}`), - bundle: &bundle.RekorBundle{ - Payload: bundle.RekorPayload{ - Body: "ewogICJzcGVjIjogewogICAgInNpZ25hdHVyZSI6IHsKICAgICAgImNvbnRlbnQiOiAiTUVVQ0lRQ3llbThHY3Iwc1BGTVA3ZlRYYXpDTjU3TmNONStNanhKdzlPbzB4MmVNK0FJZ2RnQlA5NkJPMVRlL05kYmpIYlVlYjBCVXllNmRlUmdWdFFFdjVObzVzbUE9IgogICAgfQogIH0KfQ==", - LogID: "samplelogID", - IntegratedTime: 12345, - }, - }, - cert: &x509.Certificate{ - EmailAddresses: []string{"spirex@example.com"}, - Extensions: []pkix.Extension{{ - Id: oidcIssuerOID, - Value: []byte(`issuer1`), - }}, - }, - }, - }, true, nil), - fetchBinding: createFetchFunction(&remote.Descriptor{ - Manifest: []byte("sometext"), - }, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - subjectAllowList: map[string][]string{ - "issuer1": {"spirex@example.com"}, - }, - }, - status: corev1.ContainerStatus{ - Image: "spire-agent-sigstore-1", - ImageID: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - ContainerID: "000000", - }, - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: []string{ - "000000:image-signature-subject:spirex@example.com", "000000:image-signature-content:MEUCIQCyem8Gcr0sPFMP7fTXazCN57NcN5+MjxJw9Oo0x2eM+AIgdgBP96BO1Te/NdbjHbUeb0BUye6deRgVtQEv5No5smA=", "000000:image-signature-logid:samplelogID", "000000:image-signature-integrated-time:12345", "sigstore-validation:passed", - }, - }, - { - name: "Attest skipped image", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createNilVerifyFunction(), - fetchBinding: createNilFetchFunction(), - checkOptsBinding: createNilCheckOptsFunction(), - }, - skippedImages: map[string]struct{}{ - "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505": {}, - }, - rekorURL: rekorDefaultURL(), - }, - status: corev1.ContainerStatus{ - Image: "spire-agent-sigstore-2", - ImageID: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - ContainerID: "111111", - }, - want: []string{ - "sigstore-validation:passed", - }, - }, - { - name: "Attest image with no signature", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createVerifyFunction(nil, true, fmt.Errorf("no signature found")), - fetchBinding: createFetchFunction(&remote.Descriptor{ - Manifest: []byte("sometext"), - }, nil), - checkOptsBinding: createCheckOptsFunction(defaultCheckOpts, nil), - }, - rekorURL: rekorDefaultURL(), - }, - status: corev1.ContainerStatus{ - Image: "spire-agent-sigstore-3", - ImageID: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - ContainerID: "222222", - }, - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedVerifyArguments: verifyFunctionArguments{ - context: context.Background(), - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: defaultCheckOpts, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: rekorDefaultURL(), - }, - want: nil, - wantedErr: fmt.Errorf("error verifying signature: %w", errors.New("no signature found")), - }, - { - name: "Attest image with empty rekorURL", - fields: fields{ - functionBindings: sigstoreFunctionBindings{ - verifyBinding: createNilVerifyFunction(), - fetchBinding: createFetchFunction(&remote.Descriptor{ - Manifest: []byte("sometext"), - }, nil), - checkOptsBinding: createCheckOptsFunction(emptyURLCheckOpts, emptyError), - }, - rekorURL: url.URL{}, - }, - status: corev1.ContainerStatus{ - Image: "spire-agent-sigstore-3", - ImageID: "docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505", - ContainerID: "222222", - }, - wantedFetchArguments: fetchFunctionArguments{ - ref: name.MustParseReference("docker-registry.com/some/image@sha256:5fb2054478353fd8d514056d1745b3a9eef066deadda4b90967af7ca65ce6505"), - options: nil, - }, - wantedCheckOptsArguments: checkOptsFunctionArguments{ - url: url.URL{}, - }, - want: nil, - wantedErr: fmt.Errorf("could not create cosign check options: %w", emptyError), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt := tt - fetchArguments := fetchFunctionArguments{} - verifyArguments := verifyFunctionArguments{} - checkOptsArguments := checkOptsFunctionArguments{} - sigstore := &sigstoreImpl{ - functionHooks: sigstoreFunctionHooks{ - verifyFunction: tt.fields.functionBindings.verifyBinding(t, &verifyArguments), - fetchImageManifestFunction: tt.fields.functionBindings.fetchBinding(t, &fetchArguments), - checkOptsFunction: tt.fields.functionBindings.checkOptsBinding(t, &checkOptsArguments), - }, - skippedImages: tt.fields.skippedImages, - rekorURL: tt.fields.rekorURL, - sigstorecache: NewCache(maximumAmountCache), - logger: hclog.Default(), - } - - for issuer, subjects := range tt.fields.subjectAllowList { - for _, subject := range subjects { - sigstore.AddAllowedSubject(issuer, subject) - } - } - - got, err := sigstore.AttestContainerSignatures(context.Background(), &tt.status) - - if tt.wantedErr != nil { - require.EqualError(t, err, tt.wantedErr.Error()) - } else { - require.NoError(t, err) - } - - require.Equal(t, tt.want, got) - require.Equal(t, tt.wantedFetchArguments, fetchArguments) - require.Equal(t, tt.wantedVerifyArguments, verifyArguments) - require.Equal(t, tt.wantedCheckOptsArguments, checkOptsArguments) - }) - } -} - -func TestSigstoreimpl_SetRekorURL(t *testing.T) { - tests := []struct { - name string - rekorURL url.URL - rekorURLArg string - want url.URL - wantedErr error - }{ - { - name: "SetRekorURL", - rekorURL: url.URL{}, - rekorURLArg: "https://rekor.com", - want: url.URL{ - Scheme: "https", - Host: "rekor.com", - }, - }, - { - name: "SetRekorURL with empty url", - rekorURL: url.URL{ - Scheme: "https", - Host: "non.empty.url", - }, - rekorURLArg: "", - want: url.URL{ - Scheme: "https", - Host: "non.empty.url", - }, - wantedErr: fmt.Errorf("rekor URL is empty"), - }, - { - name: "SetRekorURL with invalid URL", - rekorURL: url.URL{}, - rekorURLArg: "http://invalid.{{}))}.url.com", // invalid url - want: url.URL{}, - wantedErr: fmt.Errorf("failed parsing rekor URI: parse %q: invalid character %q in host name", "http://invalid.{{}))}.url.com", "{"), - }, - { - name: "SetRekorURL with empty host url", - rekorURL: url.URL{}, - rekorURLArg: "path-no-host", // URI parser uses this as path, not host - want: url.URL{}, - wantedErr: fmt.Errorf("host is required on rekor URL"), - }, - { - name: "SetRekorURL with invalid URL scheme", - rekorURL: url.URL{}, - rekorURLArg: "abc://invalid.scheme.com", // invalid scheme - want: url.URL{}, - wantedErr: fmt.Errorf("invalid rekor URL Scheme %q", "abc"), - }, - { - name: "SetRekorURL with empty URL scheme", - rekorURL: url.URL{}, - rekorURLArg: "//no.scheme.com/path", // empty scheme - want: url.URL{}, - wantedErr: fmt.Errorf("invalid rekor URL Scheme \"\""), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sigstore := &sigstoreImpl{ - rekorURL: tt.rekorURL, - } - err := sigstore.SetRekorURL(tt.rekorURLArg) - if tt.wantedErr != nil { - require.EqualError(t, err, tt.wantedErr.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.want, sigstore.rekorURL) - }) - } -} - -type ociSignature = oci.Signature - -type signature struct { - ociSignature - - payload []byte - cert *x509.Certificate - bundle *bundle.RekorBundle -} - -func (s signature) Payload() ([]byte, error) { - return s.payload, nil -} - -func (s signature) Cert() (*x509.Certificate, error) { - return s.cert, nil -} - -func (s signature) Bundle() (*bundle.RekorBundle, error) { - return s.bundle, nil -} - -type noPayloadSignature signature - -func (noPayloadSignature) Payload() ([]byte, error) { - return nil, errors.New("no payload test") -} - -type nilBundleSignature signature - -func (s nilBundleSignature) Payload() ([]byte, error) { - return s.payload, nil -} - -func (s nilBundleSignature) Cert() (*x509.Certificate, error) { - return s.cert, nil -} - -func (s nilBundleSignature) Bundle() (*bundle.RekorBundle, error) { - return nil, fmt.Errorf("no bundle test") -} - -type noCertSignature signature - -func (s noCertSignature) Payload() ([]byte, error) { - return s.payload, nil -} - -func (noCertSignature) Cert() (*x509.Certificate, error) { - return nil, errors.New("no cert test") -} - -func createVerifyFunction(returnSignatures []oci.Signature, returnBundleVerified bool, returnError error) verifyFunctionBinding { - bindVerifyArgumentsFunction := func(t require.TestingT, verifyArguments *verifyFunctionArguments) verifyFunctionType { - newVerifyFunction := func(context context.Context, ref name.Reference, co *cosign.CheckOpts) ([]oci.Signature, bool, error) { - verifyArguments.context = context - verifyArguments.ref = ref - verifyArguments.options = co - return returnSignatures, returnBundleVerified, returnError - } - return newVerifyFunction - } - return bindVerifyArgumentsFunction -} - -func createNilVerifyFunction() verifyFunctionBinding { - bindVerifyArgumentsFunction := func(t require.TestingT, verifyArguments *verifyFunctionArguments) verifyFunctionType { - failFunction := func(context context.Context, ref name.Reference, co *cosign.CheckOpts) ([]oci.Signature, bool, error) { - require.FailNow(t, "nil verify function should not be called") - return nil, false, nil - } - return failFunction - } - return bindVerifyArgumentsFunction -} - -func createFetchFunction(returnDescriptor *remote.Descriptor, returnError error) fetchFunctionBinding { - bindFetchArgumentsFunction := func(t require.TestingT, fetchArguments *fetchFunctionArguments) fetchImageManifestFunctionType { - newFetchFunction := func(ref name.Reference, options ...remote.Option) (*remote.Descriptor, error) { - fetchArguments.ref = ref - fetchArguments.options = options - return returnDescriptor, returnError - } - return newFetchFunction - } - return bindFetchArgumentsFunction -} - -func createNilFetchFunction() fetchFunctionBinding { - bindFetchArgumentsFunction := func(t require.TestingT, fetchArguments *fetchFunctionArguments) fetchImageManifestFunctionType { - failFunction := func(ref name.Reference, options ...remote.Option) (*remote.Descriptor, error) { - require.FailNow(t, "nil fetch function should not be called") - return nil, nil - } - return failFunction - } - return bindFetchArgumentsFunction -} - -func createCheckOptsFunction(returnCheckOpts *cosign.CheckOpts, returnErr error) checkOptsFunctionBinding { - bindCheckOptsArgumentsFunction := func(t require.TestingT, checkOptsArguments *checkOptsFunctionArguments) checkOptsFunctionType { - newCheckOptsFunction := func(ctx context.Context, url url.URL, enforceSCT bool, allowedSubjects map[string]map[string]struct{}) (*cosign.CheckOpts, error) { - checkOptsArguments.url = url - return returnCheckOpts, returnErr - } - return newCheckOptsFunction - } - return bindCheckOptsArgumentsFunction -} - -func createNilCheckOptsFunction() checkOptsFunctionBinding { - bindCheckOptsArgumentsFunction := func(t require.TestingT, checkOptsArguments *checkOptsFunctionArguments) checkOptsFunctionType { - failFunction := func(ctx context.Context, url url.URL, enforceSCT bool, allowedSubjects map[string]map[string]struct{}) (*cosign.CheckOpts, error) { - require.FailNow(t, "nil check opts function should not be called") - return nil, fmt.Errorf("nil check opts function should not be called") - } - return failFunction - } - return bindCheckOptsArgumentsFunction -} - -func rekorDefaultURL() url.URL { - return url.URL{ - Scheme: rekor.DefaultSchemes[0], - Host: rekor.DefaultHost, - Path: rekor.DefaultBasePath, - } -} - -type sigstoreFunctionBindings struct { - verifyBinding verifyFunctionBinding - fetchBinding fetchFunctionBinding - checkOptsBinding checkOptsFunctionBinding -} - -type checkOptsFunctionArguments struct { - url url.URL -} - -type checkOptsFunctionBinding func(require.TestingT, *checkOptsFunctionArguments) checkOptsFunctionType - -type fetchFunctionArguments struct { - ref name.Reference - options []remote.Option -} - -type fetchFunctionBinding func(require.TestingT, *fetchFunctionArguments) fetchImageManifestFunctionType - -type verifyFunctionArguments struct { - context context.Context - ref name.Reference - options *cosign.CheckOpts -} - -type verifyFunctionBinding func(require.TestingT, *verifyFunctionArguments) verifyFunctionType diff --git a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstorecache.go b/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstorecache.go deleted file mode 100644 index debbf14f51a..00000000000 --- a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstorecache.go +++ /dev/null @@ -1,87 +0,0 @@ -//go:build !windows - -package sigstore - -import ( - "container/list" - "sync" -) - -// Item represents a key-value pair -type Item struct { - Key string - Value []SelectorsFromSignatures -} - -// Cache defines the behaviors of our cache -type Cache interface { - GetSignature(key string) *Item - PutSignature(Item) -} - -// Map for signatures is created -type MapItem struct { - element *list.Element - item *Item -} - -// cacheImpl implements Cache interface -type cacheImpl struct { - size int - items *list.List - mutex sync.RWMutex - itemsMap map[string]MapItem -} - -// NewCache creates and returns a new cache -func NewCache(maximumAmountCache int) Cache { - return &cacheImpl{ - size: maximumAmountCache, - items: list.New(), - mutex: sync.RWMutex{}, - itemsMap: make(map[string]MapItem), - } -} - -// GetSignature returns an existing item from the cache. -// Get also moves the existing item to the front of the items list to indicate that the existing item is recently used. -func (c *cacheImpl) GetSignature(key string) *Item { - c.mutex.RLock() - defer c.mutex.RUnlock() - - e, ok := c.itemsMap[key] - if !ok { - return nil - } - - c.items.MoveToFront(e.element) - - return e.item -} - -// PutSignature puts a new item into the cache. -// Put removes the least recently used item from the items list when the cache is full. -// Put pushes the new item to the front of the items list to indicate that the new item is recently used. -func (c *cacheImpl) PutSignature(i Item) { - c.mutex.Lock() - defer c.mutex.Unlock() - - e, ok := c.itemsMap[i.Key] - if ok { - c.items.Remove(e.element) - c.itemsMap[i.Key] = MapItem{ - element: c.items.PushFront(i.Key), - item: &i, - } - return - } - if c.items.Len() >= c.size { - removed := c.items.Remove(c.items.Back()) - delete(c.itemsMap, removed.(string)) - } - - c.itemsMap[i.Key] = MapItem{ - element: c.items.PushFront(i.Key), - item: &i, - } -} diff --git a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstorecache_test.go b/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstorecache_test.go deleted file mode 100644 index c90824cb364..00000000000 --- a/pkg/agent/plugin/workloadattestor/k8s/sigstore/sigstorecache_test.go +++ /dev/null @@ -1,541 +0,0 @@ -//go:build !windows - -package sigstore - -import ( - "container/list" - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -var ( - selectors1 = Item{ - Key: "signature1", - Value: []SelectorsFromSignatures{ - { - Subject: "spirex1@example.com", - Content: "content1", - LogID: "1111111111111111", - IntegratedTime: "1111111111111111", - }, - }, - } - - selectors2 = Item{ - Key: "signature2", - Value: []SelectorsFromSignatures{ - { - Subject: "spirex2@example.com", - Content: "content2", - LogID: "2222222222222222", - IntegratedTime: "2222222222222222", - }, - }, - } - - selectors3 = Item{ - Key: "signature3", - Value: []SelectorsFromSignatures{ - { - Subject: "spirex3@example.com", - Content: "content3", - LogID: "3333333333333333", - IntegratedTime: "3333333333333333", - }, - }, - } - - selectors3Updated = Item{ - Key: "signature3", - Value: []SelectorsFromSignatures{ - { - Subject: "spirex3@example.com", - Content: "content4", - LogID: "4444444444444444", - IntegratedTime: "4444444444444444", - }, - }, - } - - selectors2Updated = Item{ - Key: "signature2", - Value: []SelectorsFromSignatures{ - { - Subject: "spirex2@example.com", - Content: "content5", - LogID: "5555555555555555", - IntegratedTime: "5555555555555555", - }, - }, - } -) - -func TestNewCache(t *testing.T) { - want := &cacheImpl{ - size: 3, - items: list.New(), - mutex: sync.RWMutex{}, - itemsMap: make(map[string]MapItem), - } - got := NewCache(3) - require.Equal(t, want, got, "NewCache() = %v, want %v", got, want) -} - -func TestCacheimpl_GetSignature(t *testing.T) { - m, items := makeMapAndList(&selectors1, &selectors2) - cacheInstance := &cacheImpl{ - size: 3, - items: items, - mutex: sync.RWMutex{}, - itemsMap: m, - } - - caseInOrderList := list.New() - caseInOrderList.PushFront(selectors1.Key) - caseInOrderList.PushFront(selectors2.Key) - - caseReorderList := list.New() - caseReorderList.PushFront(selectors2.Key) - caseReorderList.PushFront(selectors1.Key) - - tests := []struct { - name string - want *Item - key string - errorMessage string - wantedMap map[string]MapItem - wantedList *list.List - }{ - { - name: "Non existing entry", - want: nil, - key: selectors3.Key, - errorMessage: "A non-existing item's key should return a nil item.", - wantedMap: map[string]MapItem{ - "signature1": {item: &selectors1, element: m[selectors1.Key].element}, - "signature2": {item: &selectors2, element: m[selectors2.Key].element}, - }, - wantedList: caseInOrderList, - }, - { - name: "Existing entry", - want: &selectors2, - key: selectors2.Key, - wantedMap: map[string]MapItem{ - "signature1": {item: &selectors1, element: m[selectors1.Key].element}, - "signature2": {item: &selectors2, element: m[selectors2.Key].element}, - }, - wantedList: caseInOrderList, - }, - { - name: "Existing entry, reorder on get", - want: &selectors1, - key: selectors1.Key, - wantedMap: map[string]MapItem{ - "signature1": {item: &selectors1, element: m[selectors1.Key].element}, - "signature2": {item: &selectors2, element: m[selectors2.Key].element}, - }, - wantedList: caseReorderList, - }, - } - - for _, tt := range tests { - got := cacheInstance.GetSignature(tt.key) - require.Equal(t, got, tt.want, "%v Got: %v Want: %v", tt.errorMessage, got, tt.want) - require.Equal(t, tt.wantedList, cacheInstance.items, "Lists are different Got: %v Want: %v", cacheInstance.items, tt.wantedList) - require.Equal(t, tt.wantedMap, cacheInstance.itemsMap, "Maps are different Got: %v Want: %v", cacheInstance.itemsMap, tt.wantedMap) - } -} - -func TestCacheimpl_PutSignature(t *testing.T) { - mapReorder, listReorder := makeMapAndList(&selectors2, &selectors3) - mapAddNew, listAddNew := makeMapAndList(&selectors3, &selectors2, &selectors1) - mapUpdate, listUpdate := makeMapAndList(&selectors3, &selectors2Updated) - mapReorderUpdate, listReorderUpdate := makeMapAndList(&selectors2, &selectors3Updated) - tests := []struct { - name string - item *Item - wantLength int - wantKey string - wantValue *Item - wantMap map[string]MapItem - wantList *list.List - }{ - { - name: "Put existing element", - item: &selectors3, - wantLength: 2, - wantKey: selectors3.Key, - wantValue: &selectors3, - wantMap: mapReorder, - wantList: listReorder, - }, - { - name: "Put new element", - item: &selectors1, - wantLength: 3, - wantKey: selectors1.Key, - wantValue: &selectors1, - wantMap: mapAddNew, - wantList: listAddNew, - }, - { - name: "Update entry", - item: &selectors2Updated, - wantLength: 2, - wantKey: selectors2.Key, - wantValue: &selectors2Updated, - wantMap: mapUpdate, - wantList: listUpdate, - }, - { - name: "Update entry, reorder", - item: &selectors3Updated, - wantLength: 2, - wantKey: selectors3.Key, - wantValue: &selectors3Updated, - wantMap: mapReorderUpdate, - wantList: listReorderUpdate, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - testMap, testItems := makeMapAndList(&selectors3, &selectors2) - cacheInstance := cacheImpl{ - size: 10, - items: testItems, - mutex: sync.RWMutex{}, - itemsMap: testMap, - } - cacheInstance.PutSignature(*tt.item) - - gotItem, present := testMap[tt.wantKey] - require.True(t, present, "key not found") - require.Equal(t, tt.wantValue, gotItem.item, "Value different than expected. \nGot: %v \nWant:%v", gotItem.item, tt.wantValue) - require.Equal(t, tt.wantLength, testItems.Len(), "List length different than expected. \nGot: %v \nWant:%v", testItems.Len(), tt.wantLength) - require.Equal(t, tt.wantList, testItems, "Lists are different Got: %v Want: %v", testItems, tt.wantList) - require.Equal(t, tt.wantMap, testMap, "Maps are different Got: %v Want: %v", testMap, tt.wantMap) - }) - } -} - -func TestCacheimpl_CheckOverflowAndUpdates(t *testing.T) { - m := make(map[string]MapItem) - items := list.New() - - cacheInstance := &cacheImpl{ - size: 2, - items: items, - mutex: sync.RWMutex{}, - itemsMap: m, - } - - putSteps1 := []struct { - name string - item *Item - wantLength int - wantKey string - wantValue *Item - wantHeadKey string - }{ - { - name: "Put first element", - item: &selectors1, - wantLength: 1, - wantKey: selectors1.Key, - wantValue: &selectors1, - wantHeadKey: selectors1.Key, - }, - { - name: "Put first element again", - item: &selectors1, - wantLength: 1, - wantKey: selectors1.Key, - wantValue: &selectors1, - wantHeadKey: selectors1.Key, - }, - { - name: "Put second element", - item: &selectors2, - wantLength: 2, - wantKey: selectors2.Key, - wantValue: &selectors2, - wantHeadKey: selectors2.Key, - }, - { - name: "Put third element, Overflow cache", - item: &selectors3, - wantLength: 2, - wantKey: selectors3.Key, - wantValue: &selectors3, - wantHeadKey: selectors3.Key, - }, - { - name: "Update entry", - item: &selectors3Updated, - wantLength: 2, - wantKey: selectors3.Key, - wantValue: &selectors3Updated, - wantHeadKey: selectors3.Key, - }, - { - name: "Put second element, again", - item: &selectors2, - wantLength: 2, - wantKey: selectors2.Key, - wantValue: &selectors2, - wantHeadKey: selectors2.Key, - }, - } - getSteps1 := []struct { - name string - key string - item *Item - wantLength int - wantValue *Item - wantHeadKey string - }{ - { - name: "Get first element", - key: selectors1.Key, - item: nil, - wantLength: 2, - wantHeadKey: selectors2.Key, - }, - { - name: "Get third element", - key: selectors3.Key, - item: &selectors3Updated, - wantLength: 2, - wantHeadKey: selectors3.Key, - }, - { - name: "Get first element, after third element was accessed", - key: selectors1.Key, - item: nil, - wantLength: 2, - wantHeadKey: selectors3.Key, - }, - { - name: "Get second element", - key: selectors2.Key, - item: &selectors2, - wantLength: 2, - wantValue: &selectors2, - wantHeadKey: selectors2.Key, - }, - } - - putSteps2 := []struct { - name string - item *Item - wantLength int - wantKey string - wantValue *Item - wantHeadKey string - }{ - { - name: "Put first element again, overflow cache", - item: &selectors1, - wantLength: 2, - wantKey: selectors1.Key, - wantValue: &selectors1, - wantHeadKey: selectors1.Key, - }, - { - name: "Put second element updated", - item: &selectors2Updated, - wantLength: 2, - wantKey: selectors2.Key, - wantValue: &selectors2Updated, - wantHeadKey: selectors2.Key, - }, - { - name: "Put third element again, overflow cache", - item: &selectors3Updated, - wantLength: 2, - wantKey: selectors3.Key, - wantValue: &selectors3Updated, - wantHeadKey: selectors3.Key, - }, - { - name: "Revert third entry", - item: &selectors3, - wantLength: 2, - wantKey: selectors3.Key, - wantValue: &selectors3, - wantHeadKey: selectors3.Key, - }, - { - name: "Pull second element to front", - item: &selectors2Updated, - wantLength: 2, - wantKey: selectors2.Key, - wantValue: &selectors2Updated, - wantHeadKey: selectors2.Key, - }, - { - name: "Put first element for the last time, overflow cache", - item: &selectors1, - wantLength: 2, - wantKey: selectors1.Key, - wantValue: &selectors1, - wantHeadKey: selectors1.Key, - }, - } - - getSteps2 := []struct { - name string - key string - item *Item - wantLength int - wantValue *Item - wantHeadKey string - }{ - { - name: "Get third element, should fail", - key: selectors3.Key, - item: nil, - wantLength: 2, - wantHeadKey: selectors1.Key, - }, - { - name: "Get third element again, should not change head", - key: selectors3.Key, - item: nil, - wantLength: 2, - wantHeadKey: selectors1.Key, - }, - { - name: "Get first element", - key: selectors1.Key, - item: &selectors1, - wantLength: 2, - wantHeadKey: selectors1.Key, - }, - { - name: "Get second element", - key: selectors2.Key, - item: &selectors2Updated, - wantLength: 2, - wantHeadKey: selectors2.Key, - }, - { - name: "Get third element again, should have new head from last get", - key: selectors3.Key, - item: nil, - wantLength: 2, - wantHeadKey: selectors2.Key, - }, - } - - for _, step := range putSteps1 { - cacheInstance.PutSignature(*step.item) - require.Contains(t, m, step.wantKey, "Key %q should be in the map after step %q", step.wantKey, step.name) - gotItem := m[step.wantKey].item - - require.Equal(t, gotItem, step.wantValue, "Value different than expected. \nGot: %v \nWant:%v", gotItem, step.wantValue) - require.Equal(t, items.Len(), step.wantLength, "Item count should be %v after step %q", step.wantLength, step.name) - require.Equal(t, items.Front().Value, step.wantHeadKey, "First element is %v should be %v after step %q", items.Front().Value, step.wantHeadKey, step.name) - } - for _, step := range getSteps1 { - gotItem := cacheInstance.GetSignature(step.key) - - require.Equal(t, gotItem, step.item, "Value different than expected. \nGot: %v \nWant:%v", gotItem, step.item) - require.Equal(t, items.Len(), step.wantLength, "Item count should be %v after step %q", step.wantLength, step.name) - require.Equal(t, items.Front().Value, step.wantHeadKey, "First element is %v should be %v after step %q", items.Front().Value, step.wantHeadKey, step.name) - } - for _, step := range putSteps2 { - cacheInstance.PutSignature(*step.item) - require.Contains(t, m, step.wantKey, "Key %q should be in the map after step %q", step.wantKey, step.name) - gotItem := m[step.wantKey].item - - require.Equal(t, gotItem, step.wantValue, "Value different than expected. \nGot: %v \nWant:%v", gotItem, step.wantValue) - require.Equal(t, items.Len(), step.wantLength, "Item count should be %v after step %q", step.wantLength, step.name) - require.Equal(t, items.Front().Value, step.wantHeadKey, "First element is %v should be %v after step %q", items.Front().Value, step.wantHeadKey, step.name) - } - for _, step := range getSteps2 { - gotItem := cacheInstance.GetSignature(step.key) - - require.Equal(t, gotItem, step.item, "Value different than expected. \nGot: %v \nWant:%v", gotItem, step.item) - require.Equal(t, items.Len(), step.wantLength, "Item count should be %v after step %q", step.wantLength, step.name) - require.Equal(t, items.Front().Value, step.wantHeadKey, "First element is %v should be %v after step %q", items.Front().Value, step.wantHeadKey, step.name) - } -} - -func TestCacheimpl_CheckOverflow(t *testing.T) { - mapNoOverflow, listNoOverflow := makeMapAndList(&selectors1, &selectors2, &selectors3) - mapOverflow, listOverflow := makeMapAndList(&selectors2, &selectors3) - mapReorder, listReorder := makeMapAndList(&selectors2, &selectors1) - - testCases := []struct { - name string - item *Item - wantLength int - wantedList *list.List - wantedMap map[string]MapItem - maxLength int - }{ - { - name: "Put third element, no overflow", - item: &selectors3, - wantedList: listNoOverflow, - wantedMap: mapNoOverflow, - maxLength: 3, - }, - { - name: "Put existing element no overflow", - item: &selectors1, - wantedList: listReorder, - wantedMap: mapReorder, - maxLength: 2, - }, - { - name: "Put third element, overflow", - item: &selectors3, - wantedList: listOverflow, - wantedMap: mapOverflow, - maxLength: 2, - }, - } - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - m := make(map[string]MapItem) - items := list.New() - m[selectors1.Key] = MapItem{ - item: &selectors1, - element: items.PushFront(selectors1.Key), - } - m[selectors2.Key] = MapItem{ - item: &selectors2, - element: items.PushFront(selectors2.Key), - } - cacheInstance := &cacheImpl{ - size: testCase.maxLength, - items: items, - mutex: sync.RWMutex{}, - itemsMap: m, - } - cacheInstance.PutSignature(*testCase.item) - require.Equal(t, testCase.wantedList, cacheInstance.items, "List different than expected. \nGot: %v \nWant:%v", cacheInstance.items, testCase.wantedList) - require.Equal(t, testCase.wantedMap, cacheInstance.itemsMap, "Map different than expected. \nGot: %v \nWant:%v", cacheInstance.itemsMap, testCase.wantedMap) - }) - } -} - -func makeMapAndList(items ...*Item) (map[string]MapItem, *list.List) { - mp := make(map[string]MapItem) - ls := list.New() - for _, item := range items { - mp[item.Key] = MapItem{ - item: item, - element: ls.PushFront(item.Key), - } - } - return mp, ls -} diff --git a/pkg/agent/plugin/workloadattestor/unix/unix_posix.go b/pkg/agent/plugin/workloadattestor/unix/unix_posix.go index 83d22125769..90a131af7d3 100644 --- a/pkg/agent/plugin/workloadattestor/unix/unix_posix.go +++ b/pkg/agent/plugin/workloadattestor/unix/unix_posix.go @@ -176,12 +176,15 @@ func (p *Plugin) Attest(_ context.Context, req *workloadattestorv1.AttestRequest selectorValues = append(selectorValues, makeSelectorValue("path", processPath)) if config.WorkloadSizeLimit >= 0 { - exePath := p.getNamespacedPath(proc) - sha256Digest, err := util.GetSHA256Digest(exePath, config.WorkloadSizeLimit) + exePath, err := p.getNamespacedPath(proc) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } + sha256Digest, err := util.GetSHA256Digest(exePath, config.WorkloadSizeLimit) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } selectorValues = append(selectorValues, makeSelectorValue("sha256", sha256Digest)) } } @@ -273,8 +276,11 @@ func (p *Plugin) getPath(proc processInfo) (string, error) { return path, nil } -func (p *Plugin) getNamespacedPath(proc processInfo) string { - return proc.NamespacedExe() +func (p *Plugin) getNamespacedPath(proc processInfo) (string, error) { + if runtime.GOOS == "linux" { + return proc.NamespacedExe(), nil + } + return proc.Exe() } func makeSelectorValue(kind, value string) string { diff --git a/pkg/agent/plugin/workloadattestor/unix/unix_posix_test.go b/pkg/agent/plugin/workloadattestor/unix/unix_posix_test.go index d415507c76a..46847e29063 100644 --- a/pkg/agent/plugin/workloadattestor/unix/unix_posix_test.go +++ b/pkg/agent/plugin/workloadattestor/unix/unix_posix_test.go @@ -8,6 +8,7 @@ import ( "os" "os/user" "path/filepath" + "runtime" "strconv" "testing" @@ -45,6 +46,10 @@ func (s *Suite) SetupTest() { } func (s *Suite) TestAttest() { + unreadableExePath := "/proc/10/unreadable-exe" + if runtime.GOOS != "linux" { + unreadableExePath = filepath.Join(s.dir, "unreadable-exe") + } testCases := []struct { name string pid int @@ -131,7 +136,7 @@ func (s *Suite) TestAttest() { pid: 10, config: "discover_workload_path = true", expectCode: codes.Internal, - expectMsg: "workloadattestor(unix): SHA256 digest: open /proc/10/unreadable-exe: no such file or directory", + expectMsg: fmt.Sprintf("workloadattestor(unix): SHA256 digest: open %s: no such file or directory", unreadableExePath), }, { name: "process binary exceeds size limits", diff --git a/pkg/agent/svid/rotator.go b/pkg/agent/svid/rotator.go index fd532c44c89..434230d854d 100644 --- a/pkg/agent/svid/rotator.go +++ b/pkg/agent/svid/rotator.go @@ -14,8 +14,8 @@ import ( agentv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/agent/v1" node_attestor "github.com/spiffe/spire/pkg/agent/attestor/node" "github.com/spiffe/spire/pkg/agent/client" - "github.com/spiffe/spire/pkg/agent/common/backoff" "github.com/spiffe/spire/pkg/agent/plugin/keymanager" + "github.com/spiffe/spire/pkg/common/backoff" "github.com/spiffe/spire/pkg/common/nodeutil" "github.com/spiffe/spire/pkg/common/rotationutil" "github.com/spiffe/spire/pkg/common/telemetry" diff --git a/pkg/agent/svid/rotator_config.go b/pkg/agent/svid/rotator_config.go index 6eb4b0538d0..203c194ec05 100644 --- a/pkg/agent/svid/rotator_config.go +++ b/pkg/agent/svid/rotator_config.go @@ -11,10 +11,10 @@ import ( "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/agent/client" - "github.com/spiffe/spire/pkg/agent/common/backoff" "github.com/spiffe/spire/pkg/agent/manager/cache" "github.com/spiffe/spire/pkg/agent/plugin/keymanager" "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor" + "github.com/spiffe/spire/pkg/common/backoff" "github.com/spiffe/spire/pkg/common/rotationutil" "github.com/spiffe/spire/pkg/common/telemetry" ) diff --git a/pkg/agent/common/backoff/backoff.go b/pkg/common/backoff/backoff.go similarity index 100% rename from pkg/agent/common/backoff/backoff.go rename to pkg/common/backoff/backoff.go diff --git a/pkg/agent/common/backoff/backoff_test.go b/pkg/common/backoff/backoff_test.go similarity index 100% rename from pkg/agent/common/backoff/backoff_test.go rename to pkg/common/backoff/backoff_test.go diff --git a/pkg/agent/common/backoff/size_backoff.go b/pkg/common/backoff/size_backoff.go similarity index 100% rename from pkg/agent/common/backoff/size_backoff.go rename to pkg/common/backoff/size_backoff.go diff --git a/pkg/agent/common/backoff/size_backoff_test.go b/pkg/common/backoff/size_backoff_test.go similarity index 100% rename from pkg/agent/common/backoff/size_backoff_test.go rename to pkg/common/backoff/size_backoff_test.go diff --git a/pkg/common/catalog/builtin.go b/pkg/common/catalog/builtin.go index 418d93c4f0c..ae246e81648 100644 --- a/pkg/common/catalog/builtin.go +++ b/pkg/common/catalog/builtin.go @@ -62,7 +62,7 @@ func loadBuiltIn(ctx context.Context, builtIn BuiltIn, config BuiltInConfig) (_ }() closers = append(closers, dialer) - builtinServer, serverCloser := newBuiltInServer() + builtinServer, serverCloser := newBuiltInServer(config.Log) closers = append(closers, serverCloser) pluginServers := append([]pluginsdk.ServiceServer{builtIn.Plugin}, builtIn.Services...) @@ -83,11 +83,11 @@ func loadBuiltIn(ctx context.Context, builtIn BuiltIn, config BuiltInConfig) (_ return newPlugin(ctx, builtinConn, info, config.Log, closers, config.HostServices) } -func newBuiltInServer() (*grpc.Server, io.Closer) { +func newBuiltInServer(log logrus.FieldLogger) (*grpc.Server, io.Closer) { drain := &drainHandlers{} return grpc.NewServer( - grpc.ChainStreamInterceptor(drain.StreamServerInterceptor, streamPanicInterceptor), - grpc.ChainUnaryInterceptor(drain.UnaryServerInterceptor, unaryPanicInterceptor), + grpc.ChainStreamInterceptor(drain.StreamServerInterceptor, streamPanicInterceptor(log)), + grpc.ChainUnaryInterceptor(drain.UnaryServerInterceptor, unaryPanicInterceptor(log)), ), closerFunc(drain.Wait) } @@ -102,7 +102,7 @@ func (d *builtinDialer) DialHost(context.Context) (grpc.ClientConnInterface, err if d.conn != nil { return d.conn, nil } - server := newHostServer(d.pluginName, d.hostServices) + server := newHostServer(d.log, d.pluginName, d.hostServices) conn, err := startPipeServer(server, d.log) if err != nil { return nil, err diff --git a/pkg/common/catalog/external.go b/pkg/common/catalog/external.go index 71788962765..1a65b19f53d 100644 --- a/pkg/common/catalog/external.go +++ b/pkg/common/catalog/external.go @@ -157,7 +157,7 @@ func (p *hcClientPlugin) GRPCClient(ctx context.Context, b *goplugin.GRPCBroker, return nil, errs.Wrap(err) } - server := newHostServer(p.config.Name, p.config.HostServices) + server := newHostServer(p.config.Log, p.config.Name, p.config.HostServices) var wg sync.WaitGroup wg.Add(1) diff --git a/pkg/common/catalog/host.go b/pkg/common/catalog/host.go index 49fd2254055..8fefc99046d 100644 --- a/pkg/common/catalog/host.go +++ b/pkg/common/catalog/host.go @@ -2,21 +2,24 @@ package catalog import ( "context" + "fmt" + "runtime/debug" + "github.com/sirupsen/logrus" "github.com/spiffe/spire-plugin-sdk/pluginsdk" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -func newHostServer(pluginName string, hostServices []pluginsdk.ServiceServer) *grpc.Server { +func newHostServer(log logrus.FieldLogger, pluginName string, hostServices []pluginsdk.ServiceServer) *grpc.Server { s := grpc.NewServer( grpc.ChainStreamInterceptor( - streamPanicInterceptor, + streamPanicInterceptor(log), streamPluginInterceptor(pluginName), ), grpc.ChainUnaryInterceptor( - unaryPanicInterceptor, + unaryPanicInterceptor(log), unaryPluginInterceptor(pluginName), ), ) @@ -38,22 +41,34 @@ func unaryPluginInterceptor(name string) grpc.UnaryServerInterceptor { } } -func streamPanicInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { - defer func() { - if r := recover(); r != nil { - err = status.Errorf(codes.Internal, "%s", r) - } - }() - return handler(srv, ss) +func streamPanicInterceptor(log logrus.FieldLogger) grpc.StreamServerInterceptor { + return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { + defer func() { + if r := recover(); r != nil { + err = convertPanic(log, r) + } + }() + return handler(srv, ss) + } +} + +func unaryPanicInterceptor(log logrus.FieldLogger) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ any, err error) { + defer func() { + if r := recover(); r != nil { + err = convertPanic(log, r) + } + }() + return handler(ctx, req) + } } -func unaryPanicInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ any, err error) { - defer func() { - if r := recover(); r != nil { - err = status.Errorf(codes.Internal, "%s", r) - } - }() - return handler(ctx, req) +func convertPanic(log logrus.FieldLogger, r any) error { + log.WithFields(logrus.Fields{ + "cause": fmt.Sprint(r), + "stack": string(debug.Stack()), + }).Error("Plugin panicked") + return status.Errorf(codes.Internal, "%s", r) } type streamWrapper struct { diff --git a/pkg/common/coretypes/bundle/bundle_test.go b/pkg/common/coretypes/bundle/bundle_test.go index ef3d1744e8b..d297393f5dd 100644 --- a/pkg/common/coretypes/bundle/bundle_test.go +++ b/pkg/common/coretypes/bundle/bundle_test.go @@ -32,12 +32,15 @@ MWnIPs59/JF8AiBeKSM/rkL2igQchDTvlJJWsyk9YL8UZI/XfZO7907TWA== pkixBytes, _ = x509.MarshalPKIXPublicKey(publicKey) apiJWTAuthoritiesGood = []*apitypes.JWTKey{ {KeyId: "ID", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()}, + {KeyId: "IDTainted", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix(), Tainted: true}, } apiJWTAuthoritiesBad = []*apitypes.JWTKey{ {PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()}, + {PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()}, } apiX509AuthoritiesGood = []*apitypes.X509Certificate{ {Asn1: root.Raw}, + {Asn1: root.Raw, Tainted: true}, } apiX509AuthoritiesBad = []*apitypes.X509Certificate{ {Asn1: []byte("malformed")}, @@ -71,9 +74,12 @@ MWnIPs59/JF8AiBeKSM/rkL2igQchDTvlJJWsyk9YL8UZI/XfZO7907TWA== SequenceNumber: 2, } commonInvalidTD = &common.Bundle{ - TrustDomainId: "not a trustdomain id", - RootCas: []*common.Certificate{{DerBytes: root.Raw}}, - JwtSigningKeys: []*common.PublicKey{{Kid: "ID", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix()}}, + TrustDomainId: "not a trustdomain id", + RootCas: []*common.Certificate{{DerBytes: root.Raw}}, + JwtSigningKeys: []*common.PublicKey{ + {Kid: "ID", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix()}, + {Kid: "IDTainted", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix(), TaintedKey: true}, + }, RefreshHint: 1, SequenceNumber: 2, } @@ -93,12 +99,14 @@ MWnIPs59/JF8AiBeKSM/rkL2igQchDTvlJJWsyk9YL8UZI/XfZO7907TWA== } pluginJWTAuthoritiesGood = []*plugintypes.JWTKey{ {KeyId: "ID", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()}, + {KeyId: "IDTainted", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix(), Tainted: true}, } pluginJWTAuthoritiesBad = []*plugintypes.JWTKey{ {PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()}, } pluginX509AuthoritiesGood = []*plugintypes.X509Certificate{ {Asn1: root.Raw}, + {Asn1: root.Raw, Tainted: true}, } pluginX509AuthoritiesBad = []*plugintypes.X509Certificate{ {Asn1: []byte("malformed")}, @@ -132,9 +140,15 @@ MWnIPs59/JF8AiBeKSM/rkL2igQchDTvlJJWsyk9YL8UZI/XfZO7907TWA== SequenceNumber: 2, } commonGood = &common.Bundle{ - TrustDomainId: "spiffe://example.org", - RootCas: []*common.Certificate{{DerBytes: root.Raw}}, - JwtSigningKeys: []*common.PublicKey{{Kid: "ID", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix()}}, + TrustDomainId: "spiffe://example.org", + RootCas: []*common.Certificate{ + {DerBytes: root.Raw}, + {DerBytes: root.Raw, TaintedKey: true}, + }, + JwtSigningKeys: []*common.PublicKey{ + {Kid: "ID", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix()}, + {Kid: "IDTainted", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix(), TaintedKey: true}, + }, RefreshHint: 1, SequenceNumber: 2, } diff --git a/pkg/common/coretypes/jwtkey/apitypes.go b/pkg/common/coretypes/jwtkey/apitypes.go index 49eaf42c3a6..cca9f6e7bd9 100644 --- a/pkg/common/coretypes/jwtkey/apitypes.go +++ b/pkg/common/coretypes/jwtkey/apitypes.go @@ -6,7 +6,7 @@ import ( ) func ToAPIProto(jwtKey JWTKey) (*apitypes.JWTKey, error) { - id, publicKey, expiresAt, err := toProtoFields(jwtKey) + id, publicKey, expiresAt, tainted, err := toProtoFields(jwtKey) if err != nil { return nil, err } @@ -15,6 +15,7 @@ func ToAPIProto(jwtKey JWTKey) (*apitypes.JWTKey, error) { KeyId: id, PublicKey: publicKey, ExpiresAt: expiresAt, + Tainted: tainted, }, nil } @@ -23,7 +24,7 @@ func ToAPIFromPluginProto(pb *plugintypes.JWTKey) (*apitypes.JWTKey, error) { return nil, nil } - jwtKey, err := fromProtoFields(pb.KeyId, pb.PublicKey, pb.ExpiresAt) + jwtKey, err := fromProtoFields(pb.KeyId, pb.PublicKey, pb.ExpiresAt, pb.Tainted) if err != nil { return nil, err } diff --git a/pkg/common/coretypes/jwtkey/commontypes.go b/pkg/common/coretypes/jwtkey/commontypes.go index dc5ba1ea6f0..3b8b0e883c1 100644 --- a/pkg/common/coretypes/jwtkey/commontypes.go +++ b/pkg/common/coretypes/jwtkey/commontypes.go @@ -6,7 +6,7 @@ import ( ) func FromCommonProto(pb *common.PublicKey) (JWTKey, error) { - return fromProtoFields(pb.Kid, pb.PkixBytes, pb.NotAfter) + return fromProtoFields(pb.Kid, pb.PkixBytes, pb.NotAfter, pb.TaintedKey) } func FromCommonProtos(pbs []*common.PublicKey) ([]JWTKey, error) { @@ -25,14 +25,15 @@ func FromCommonProtos(pbs []*common.PublicKey) ([]JWTKey, error) { } func ToCommonProto(jwtKey JWTKey) (*common.PublicKey, error) { - id, publicKey, expiresAt, err := toProtoFields(jwtKey) + id, publicKey, expiresAt, tainted, err := toProtoFields(jwtKey) if err != nil { return nil, err } return &common.PublicKey{ - Kid: id, - PkixBytes: publicKey, - NotAfter: expiresAt, + Kid: id, + PkixBytes: publicKey, + NotAfter: expiresAt, + TaintedKey: tainted, }, nil } diff --git a/pkg/common/coretypes/jwtkey/jwtkey.go b/pkg/common/coretypes/jwtkey/jwtkey.go index f7a52006fb3..fb960d0573f 100644 --- a/pkg/common/coretypes/jwtkey/jwtkey.go +++ b/pkg/common/coretypes/jwtkey/jwtkey.go @@ -12,30 +12,30 @@ type JWTKey struct { ID string PublicKey crypto.PublicKey ExpiresAt time.Time + Tainted bool } -func toProtoFields(jwtKey JWTKey) (string, []byte, int64, error) { +func toProtoFields(jwtKey JWTKey) (id string, publicKey []byte, expiresAt int64, tainted bool, err error) { if jwtKey.ID == "" { - return "", nil, 0, errors.New("missing key ID for JWT key") + return "", nil, 0, false, errors.New("missing key ID for JWT key") } if jwtKey.PublicKey == nil { - return "", nil, 0, fmt.Errorf("missing public key for JWT key %q", jwtKey.ID) + return "", nil, 0, false, fmt.Errorf("missing public key for JWT key %q", jwtKey.ID) } - publicKey, err := x509.MarshalPKIXPublicKey(jwtKey.PublicKey) + publicKey, err = x509.MarshalPKIXPublicKey(jwtKey.PublicKey) if err != nil { - return "", nil, 0, fmt.Errorf("failed to marshal public key for JWT key %q: %w", jwtKey.ID, err) + return "", nil, 0, false, fmt.Errorf("failed to marshal public key for JWT key %q: %w", jwtKey.ID, err) } - var expiresAt int64 if !jwtKey.ExpiresAt.IsZero() { expiresAt = jwtKey.ExpiresAt.Unix() } - return jwtKey.ID, publicKey, expiresAt, nil + return jwtKey.ID, publicKey, expiresAt, jwtKey.Tainted, nil } -func fromProtoFields(keyID string, publicKeyPKIX []byte, expiresAtUnix int64) (JWTKey, error) { +func fromProtoFields(keyID string, publicKeyPKIX []byte, expiresAtUnix int64, tainted bool) (JWTKey, error) { if keyID == "" { return JWTKey{}, errors.New("missing key ID for JWT key") } @@ -57,5 +57,6 @@ func fromProtoFields(keyID string, publicKeyPKIX []byte, expiresAtUnix int64) (J ID: keyID, PublicKey: publicKey, ExpiresAt: expiresAt, + Tainted: tainted, }, nil } diff --git a/pkg/common/coretypes/jwtkey/jwtkey_test.go b/pkg/common/coretypes/jwtkey/jwtkey_test.go index 5910a57898b..6260a00e692 100644 --- a/pkg/common/coretypes/jwtkey/jwtkey_test.go +++ b/pkg/common/coretypes/jwtkey/jwtkey_test.go @@ -22,21 +22,25 @@ var ( pkixBytes, _ = x509.MarshalPKIXPublicKey(publicKey) junk = []byte("JUNK") jwtKeyGood = jwtkey.JWTKey{ID: "ID", PublicKey: publicKey, ExpiresAt: expiresAt} + jwtKeyTaintedGood = jwtkey.JWTKey{ID: "ID", PublicKey: publicKey, ExpiresAt: expiresAt, Tainted: true} jwtKeyNoKeyID = jwtkey.JWTKey{PublicKey: publicKey, ExpiresAt: expiresAt} jwtKeyNoPublicKey = jwtkey.JWTKey{ID: "ID", ExpiresAt: expiresAt} jwtKeyBadPublicKey = jwtkey.JWTKey{ID: "ID", PublicKey: junk, ExpiresAt: expiresAt} jwtKeyNoExpiresAt = jwtkey.JWTKey{ID: "ID", PublicKey: publicKey} pluginGood = &plugintypes.JWTKey{KeyId: "ID", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()} + pluginTaintedGood = &plugintypes.JWTKey{KeyId: "ID", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix(), Tainted: true} pluginNoKeyID = &plugintypes.JWTKey{PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()} pluginNoPublicKey = &plugintypes.JWTKey{KeyId: "ID", ExpiresAt: expiresAt.Unix()} pluginBadPublicKey = &plugintypes.JWTKey{KeyId: "ID", PublicKey: junk, ExpiresAt: expiresAt.Unix()} pluginNoExpiresAt = &plugintypes.JWTKey{KeyId: "ID", PublicKey: pkixBytes} commonGood = &common.PublicKey{Kid: "ID", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix()} + commonTaintedGood = &common.PublicKey{Kid: "ID", PkixBytes: pkixBytes, NotAfter: expiresAt.Unix(), TaintedKey: true} commonNoKeyID = &common.PublicKey{PkixBytes: pkixBytes, NotAfter: expiresAt.Unix()} commonNoPublicKey = &common.PublicKey{Kid: "ID", NotAfter: expiresAt.Unix()} commonBadPublicKey = &common.PublicKey{Kid: "ID", PkixBytes: junk, NotAfter: expiresAt.Unix()} commonNoExpiresAt = &common.PublicKey{Kid: "ID", PkixBytes: pkixBytes} apiGood = &apitypes.JWTKey{KeyId: "ID", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()} + apiTaintedGood = &apitypes.JWTKey{KeyId: "ID", PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix(), Tainted: true} apiNoKeyID = &apitypes.JWTKey{PublicKey: pkixBytes, ExpiresAt: expiresAt.Unix()} apiNoPublicKey = &apitypes.JWTKey{KeyId: "ID", ExpiresAt: expiresAt.Unix()} apiBadPublicKey = &apitypes.JWTKey{KeyId: "ID", PublicKey: junk, ExpiresAt: expiresAt.Unix()} @@ -59,6 +63,7 @@ func TestFromCommonProto(t *testing.T) { } assertOK(t, commonGood, jwtKeyGood) + assertOK(t, commonTaintedGood, jwtKeyTaintedGood) assertFail(t, commonNoKeyID, "missing key ID for JWT key") assertFail(t, commonNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, commonBadPublicKey, `failed to unmarshal public key for JWT key "ID": `) @@ -80,7 +85,8 @@ func TestFromCommonProtos(t *testing.T) { assert.Panics(t, func() { jwtkey.RequireFromCommonProtos(in) }) } - assertOK(t, []*common.PublicKey{commonGood}, []jwtkey.JWTKey{jwtKeyGood}) + assertOK(t, []*common.PublicKey{commonGood, commonTaintedGood}, + []jwtkey.JWTKey{jwtKeyGood, jwtKeyTaintedGood}) assertFail(t, []*common.PublicKey{commonNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -101,6 +107,7 @@ func TestToCommonProto(t *testing.T) { } assertOK(t, jwtKeyGood, commonGood) + assertOK(t, jwtKeyTaintedGood, commonTaintedGood) assertFail(t, jwtKeyNoKeyID, "missing key ID for JWT key") assertFail(t, jwtKeyNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, jwtKeyBadPublicKey, `failed to marshal public key for JWT key "ID": `) @@ -122,7 +129,8 @@ func TestToCommonProtos(t *testing.T) { assert.Panics(t, func() { jwtkey.RequireToCommonProtos(in) }) } - assertOK(t, []jwtkey.JWTKey{jwtKeyGood}, []*common.PublicKey{commonGood}) + assertOK(t, []jwtkey.JWTKey{jwtKeyGood, jwtKeyTaintedGood}, + []*common.PublicKey{commonGood, commonTaintedGood}) assertFail(t, []jwtkey.JWTKey{jwtKeyNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -143,6 +151,7 @@ func TestFromPluginProto(t *testing.T) { } assertOK(t, pluginGood, jwtKeyGood) + assertOK(t, pluginTaintedGood, jwtKeyTaintedGood) assertFail(t, pluginNoKeyID, "missing key ID for JWT key") assertFail(t, pluginNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, pluginBadPublicKey, `failed to unmarshal public key for JWT key "ID": `) @@ -164,7 +173,8 @@ func TestFromPluginProtos(t *testing.T) { assert.Panics(t, func() { jwtkey.RequireFromPluginProtos(in) }) } - assertOK(t, []*plugintypes.JWTKey{pluginGood}, []jwtkey.JWTKey{jwtKeyGood}) + assertOK(t, []*plugintypes.JWTKey{pluginGood, pluginTaintedGood}, + []jwtkey.JWTKey{jwtKeyGood, jwtKeyTaintedGood}) assertFail(t, []*plugintypes.JWTKey{pluginNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -185,6 +195,7 @@ func TestToPluginProto(t *testing.T) { } assertOK(t, jwtKeyGood, pluginGood) + assertOK(t, jwtKeyTaintedGood, pluginTaintedGood) assertFail(t, jwtKeyNoKeyID, "missing key ID for JWT key") assertFail(t, jwtKeyNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, jwtKeyBadPublicKey, `failed to marshal public key for JWT key "ID": `) @@ -206,7 +217,8 @@ func TestToPluginProtos(t *testing.T) { assert.Panics(t, func() { jwtkey.RequireToPluginProtos(in) }) } - assertOK(t, []jwtkey.JWTKey{jwtKeyGood}, []*plugintypes.JWTKey{pluginGood}) + assertOK(t, []jwtkey.JWTKey{jwtKeyGood, jwtKeyTaintedGood}, + []*plugintypes.JWTKey{pluginGood, pluginTaintedGood}) assertFail(t, []jwtkey.JWTKey{jwtKeyNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -227,6 +239,7 @@ func TestToCommonFromPluginProto(t *testing.T) { } assertOK(t, pluginGood, commonGood) + assertOK(t, pluginTaintedGood, commonTaintedGood) assertFail(t, pluginNoKeyID, "missing key ID for JWT key") assertFail(t, pluginNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, pluginBadPublicKey, `failed to unmarshal public key for JWT key "ID": `) @@ -248,7 +261,8 @@ func TestToCommonFromPluginProtos(t *testing.T) { assert.Panics(t, func() { jwtkey.RequireToCommonFromPluginProtos(in) }) } - assertOK(t, []*plugintypes.JWTKey{pluginGood}, []*common.PublicKey{commonGood}) + assertOK(t, []*plugintypes.JWTKey{pluginGood, pluginTaintedGood}, + []*common.PublicKey{commonGood, commonTaintedGood}) assertFail(t, []*plugintypes.JWTKey{pluginNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -269,6 +283,7 @@ func TestToPluginFromCommonProto(t *testing.T) { } assertOK(t, commonGood, pluginGood) + assertOK(t, commonTaintedGood, pluginTaintedGood) assertFail(t, commonNoKeyID, "missing key ID for JWT key") assertFail(t, commonNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, commonBadPublicKey, `failed to unmarshal public key for JWT key "ID": `) @@ -290,7 +305,8 @@ func TestToPluginFromCommonProtos(t *testing.T) { assert.Panics(t, func() { jwtkey.RequireToPluginFromCommonProtos(in) }) } - assertOK(t, []*common.PublicKey{commonGood}, []*plugintypes.JWTKey{pluginGood}) + assertOK(t, []*common.PublicKey{commonGood, commonTaintedGood}, + []*plugintypes.JWTKey{pluginGood, pluginTaintedGood}) assertFail(t, []*common.PublicKey{commonNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -308,6 +324,7 @@ func TestToPluginFromAPIProto(t *testing.T) { } assertOK(t, apiGood, pluginGood) + assertOK(t, apiTaintedGood, pluginTaintedGood) assertFail(t, apiNoKeyID, "missing key ID for JWT key") assertFail(t, apiNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, apiBadPublicKey, `failed to unmarshal public key for JWT key "ID": `) @@ -328,7 +345,8 @@ func TestToPluginFromAPIProtos(t *testing.T) { assert.Empty(t, actualOut) } - assertOK(t, []*apitypes.JWTKey{apiGood}, []*plugintypes.JWTKey{pluginGood}) + assertOK(t, []*apitypes.JWTKey{apiGood, apiTaintedGood}, + []*plugintypes.JWTKey{pluginGood, pluginTaintedGood}) assertFail(t, []*apitypes.JWTKey{apiNoKeyID}, "missing key ID for JWT key") assertOK(t, nil, nil) } @@ -347,6 +365,7 @@ func TestToAPIProto(t *testing.T) { } assertOK(t, jwtKeyGood, apiGood) + assertOK(t, jwtKeyTaintedGood, apiTaintedGood) assertFail(t, jwtKeyNoKeyID, "missing key ID for JWT key") assertFail(t, jwtKeyNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, jwtKeyBadPublicKey, `failed to marshal public key for JWT key "ID": `) @@ -367,6 +386,7 @@ func TestToAPIFromPluginProto(t *testing.T) { } assertOK(t, pluginGood, apiGood) + assertOK(t, pluginTaintedGood, apiTaintedGood) assertFail(t, pluginNoKeyID, "missing key ID for JWT key") assertFail(t, pluginNoPublicKey, `missing public key for JWT key "ID"`) assertFail(t, pluginBadPublicKey, `failed to unmarshal public key for JWT key "ID": `) diff --git a/pkg/common/coretypes/jwtkey/plugintypes.go b/pkg/common/coretypes/jwtkey/plugintypes.go index bd8677e8a6b..d3699844e75 100644 --- a/pkg/common/coretypes/jwtkey/plugintypes.go +++ b/pkg/common/coretypes/jwtkey/plugintypes.go @@ -7,7 +7,7 @@ import ( ) func FromPluginProto(pb *plugintypes.JWTKey) (JWTKey, error) { - return fromProtoFields(pb.KeyId, pb.PublicKey, pb.ExpiresAt) + return fromProtoFields(pb.KeyId, pb.PublicKey, pb.ExpiresAt, pb.Tainted) } func FromPluginProtos(pbs []*plugintypes.JWTKey) ([]JWTKey, error) { @@ -26,7 +26,7 @@ func FromPluginProtos(pbs []*plugintypes.JWTKey) ([]JWTKey, error) { } func ToPluginProto(jwtKey JWTKey) (*plugintypes.JWTKey, error) { - id, publicKey, expiresAt, err := toProtoFields(jwtKey) + id, publicKey, expiresAt, tainted, err := toProtoFields(jwtKey) if err != nil { return nil, err } @@ -34,6 +34,7 @@ func ToPluginProto(jwtKey JWTKey) (*plugintypes.JWTKey, error) { KeyId: id, PublicKey: publicKey, ExpiresAt: expiresAt, + Tainted: tainted, }, nil } @@ -80,7 +81,7 @@ func ToPluginFromAPIProto(pb *apitypes.JWTKey) (*plugintypes.JWTKey, error) { return nil, nil } - jwtKey, err := fromProtoFields(pb.KeyId, pb.PublicKey, pb.ExpiresAt) + jwtKey, err := fromProtoFields(pb.KeyId, pb.PublicKey, pb.ExpiresAt, pb.Tainted) if err != nil { return nil, err } diff --git a/pkg/common/coretypes/x509certificate/commontypes.go b/pkg/common/coretypes/x509certificate/commontypes.go index 893434ed63e..8fbea697cbe 100644 --- a/pkg/common/coretypes/x509certificate/commontypes.go +++ b/pkg/common/coretypes/x509certificate/commontypes.go @@ -1,21 +1,19 @@ package x509certificate import ( - "crypto/x509" - plugintypes "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/types" "github.com/spiffe/spire/proto/spire/common" ) -func FromCommonProto(pb *common.Certificate) (*x509.Certificate, error) { - return fromProtoFields(pb.DerBytes) +func FromCommonProto(pb *common.Certificate) (*X509Authority, error) { + return fromProtoFields(pb.DerBytes, pb.TaintedKey) } -func FromCommonProtos(pbs []*common.Certificate) ([]*x509.Certificate, error) { +func FromCommonProtos(pbs []*common.Certificate) ([]*X509Authority, error) { if pbs == nil { return nil, nil } - x509Certificates := make([]*x509.Certificate, 0, len(pbs)) + x509Certificates := make([]*X509Authority, 0, len(pbs)) for _, pb := range pbs { x509Certificate, err := FromCommonProto(pb) if err != nil { @@ -26,23 +24,24 @@ func FromCommonProtos(pbs []*common.Certificate) ([]*x509.Certificate, error) { return x509Certificates, nil } -func ToCommonProto(x509Certificate *x509.Certificate) (*common.Certificate, error) { - asn1, err := toProtoFields(x509Certificate) +func ToCommonProto(x509Authority *X509Authority) (*common.Certificate, error) { + asn1, tainted, err := toProtoFields(x509Authority) if err != nil { return nil, err } return &common.Certificate{ - DerBytes: asn1, + DerBytes: asn1, + TaintedKey: tainted, }, nil } -func ToCommonProtos(x509Certificates []*x509.Certificate) ([]*common.Certificate, error) { - if x509Certificates == nil { +func ToCommonProtos(x509Authorities []*X509Authority) ([]*common.Certificate, error) { + if x509Authorities == nil { return nil, nil } - pbs := make([]*common.Certificate, 0, len(x509Certificates)) - for _, x509Certificate := range x509Certificates { - pb, err := ToCommonProto(x509Certificate) + pbs := make([]*common.Certificate, 0, len(x509Authorities)) + for _, x509Authority := range x509Authorities { + pb, err := ToCommonProto(x509Authority) if err != nil { return nil, err } @@ -58,47 +57,3 @@ func ToCommonFromPluginProtos(pbs []*plugintypes.X509Certificate) ([]*common.Cer } return ToCommonProtos(certs) } - -func RawFromCommonProto(pb *common.Certificate) ([]byte, error) { - return rawFromProtoFields(pb.DerBytes) -} - -func RawFromCommonProtos(pbs []*common.Certificate) ([][]byte, error) { - if pbs == nil { - return nil, nil - } - rawX509Certificates := make([][]byte, 0, len(pbs)) - for _, pb := range pbs { - rawX509Certificate, err := RawFromCommonProto(pb) - if err != nil { - return nil, err - } - rawX509Certificates = append(rawX509Certificates, rawX509Certificate) - } - return rawX509Certificates, nil -} - -func RawToCommonProto(rawX509Certificate []byte) (*common.Certificate, error) { - asn1, err := rawToProtoFields(rawX509Certificate) - if err != nil { - return nil, err - } - return &common.Certificate{ - DerBytes: asn1, - }, nil -} - -func RawToCommonProtos(rawX509Certificates [][]byte) ([]*common.Certificate, error) { - if rawX509Certificates == nil { - return nil, nil - } - pbs := make([]*common.Certificate, 0, len(rawX509Certificates)) - for _, rawX509Certificate := range rawX509Certificates { - pb, err := RawToCommonProto(rawX509Certificate) - if err != nil { - return nil, err - } - pbs = append(pbs, pb) - } - return pbs, nil -} diff --git a/pkg/common/coretypes/x509certificate/plugintypes.go b/pkg/common/coretypes/x509certificate/plugintypes.go index 5b526ef5872..7f6eca60a32 100644 --- a/pkg/common/coretypes/x509certificate/plugintypes.go +++ b/pkg/common/coretypes/x509certificate/plugintypes.go @@ -8,15 +8,15 @@ import ( "github.com/spiffe/spire/proto/spire/common" ) -func FromPluginProto(pb *plugintypes.X509Certificate) (*x509.Certificate, error) { - return fromProtoFields(pb.Asn1) +func FromPluginProto(pb *plugintypes.X509Certificate) (*X509Authority, error) { + return fromProtoFields(pb.Asn1, pb.Tainted) } -func FromPluginProtos(pbs []*plugintypes.X509Certificate) ([]*x509.Certificate, error) { +func FromPluginProtos(pbs []*plugintypes.X509Certificate) ([]*X509Authority, error) { if pbs == nil { return nil, nil } - x509Certificates := make([]*x509.Certificate, 0, len(pbs)) + x509Certificates := make([]*X509Authority, 0, len(pbs)) for _, pb := range pbs { x509Certificate, err := FromPluginProto(pb) if err != nil { @@ -27,22 +27,23 @@ func FromPluginProtos(pbs []*plugintypes.X509Certificate) ([]*x509.Certificate, return x509Certificates, nil } -func ToPluginProto(x509Certificate *x509.Certificate) (*plugintypes.X509Certificate, error) { - asn1, err := toProtoFields(x509Certificate) +func ToPluginProto(x509Authority *X509Authority) (*plugintypes.X509Certificate, error) { + asn1, tainted, err := toProtoFields(x509Authority) if err != nil { return nil, err } return &plugintypes.X509Certificate{ - Asn1: asn1, + Asn1: asn1, + Tainted: tainted, }, nil } -func ToPluginProtos(x509Certificates []*x509.Certificate) ([]*plugintypes.X509Certificate, error) { - if x509Certificates == nil { +func ToPluginProtos(x509Authorities []*X509Authority) ([]*plugintypes.X509Certificate, error) { + if x509Authorities == nil { return nil, nil } - pbs := make([]*plugintypes.X509Certificate, 0, len(x509Certificates)) - for _, x509Certificate := range x509Certificates { + pbs := make([]*plugintypes.X509Certificate, 0, len(x509Authorities)) + for _, x509Certificate := range x509Authorities { pb, err := ToPluginProto(x509Certificate) if err != nil { return nil, err @@ -60,61 +61,45 @@ func ToPluginFromCommonProtos(pbs []*common.Certificate) ([]*plugintypes.X509Cer return ToPluginProtos(certs) } -func RawFromPluginProto(pb *plugintypes.X509Certificate) ([]byte, error) { - return rawFromProtoFields(pb.Asn1) -} - -func RawFromPluginProtos(pbs []*plugintypes.X509Certificate) ([][]byte, error) { - if pbs == nil { +func ToPluginFromCertificates(x509Certificates []*x509.Certificate) ([]*plugintypes.X509Certificate, error) { + if x509Certificates == nil { return nil, nil } - rawX509Certificates := make([][]byte, 0, len(pbs)) - for _, pb := range pbs { - rawX509Certificate, err := RawFromPluginProto(pb) + pbs := make([]*plugintypes.X509Certificate, 0, len(x509Certificates)) + for _, eachCert := range x509Certificates { + pb, err := ToPluginFromCertificate(eachCert) if err != nil { return nil, err } - rawX509Certificates = append(rawX509Certificates, rawX509Certificate) + pbs = append(pbs, pb) } - return rawX509Certificates, nil + + return pbs, nil } -func RawToPluginProto(rawX509Certificate []byte) (*plugintypes.X509Certificate, error) { - asn1, err := rawToProtoFields(rawX509Certificate) - if err != nil { +func ToPluginFromCertificate(x509Certificate *x509.Certificate) (*plugintypes.X509Certificate, error) { + if err := validateX509Certificate(x509Certificate); err != nil { return nil, err } + return &plugintypes.X509Certificate{ - Asn1: asn1, + Asn1: x509Certificate.Raw, + Tainted: false, }, nil } -func RawToPluginProtos(rawX509Certificates [][]byte) ([]*plugintypes.X509Certificate, error) { - if rawX509Certificates == nil { - return nil, nil - } - pbs := make([]*plugintypes.X509Certificate, 0, len(rawX509Certificates)) - for _, rawX509Certificate := range rawX509Certificates { - pb, err := RawToPluginProto(rawX509Certificate) - if err != nil { - return nil, err - } - pbs = append(pbs, pb) - } - return pbs, nil -} - func ToPluginFromAPIProto(pb *apitypes.X509Certificate) (*plugintypes.X509Certificate, error) { if pb == nil { return nil, nil } - asn1, err := rawFromProtoFields(pb.Asn1) + x509Authority, err := fromProtoFields(pb.Asn1, pb.Tainted) if err != nil { return nil, err } return &plugintypes.X509Certificate{ - Asn1: asn1, + Asn1: x509Authority.Certificate.Raw, + Tainted: x509Authority.Tainted, }, nil } diff --git a/pkg/common/coretypes/x509certificate/require.go b/pkg/common/coretypes/x509certificate/require.go index 297eb666223..3fbfb404f18 100644 --- a/pkg/common/coretypes/x509certificate/require.go +++ b/pkg/common/coretypes/x509certificate/require.go @@ -7,25 +7,25 @@ import ( "github.com/spiffe/spire/proto/spire/common" ) -func RequireFromCommonProto(pb *common.Certificate) *x509.Certificate { +func RequireFromCommonProto(pb *common.Certificate) *X509Authority { out, err := FromCommonProto(pb) panicOnError(err) return out } -func RequireFromCommonProtos(pbs []*common.Certificate) []*x509.Certificate { +func RequireFromCommonProtos(pbs []*common.Certificate) []*X509Authority { out, err := FromCommonProtos(pbs) panicOnError(err) return out } -func RequireToCommonProto(x509Certificate *x509.Certificate) *common.Certificate { +func RequireToCommonProto(x509Certificate *X509Authority) *common.Certificate { out, err := ToCommonProto(x509Certificate) panicOnError(err) return out } -func RequireToCommonProtos(x509Certificates []*x509.Certificate) []*common.Certificate { +func RequireToCommonProtos(x509Certificates []*X509Authority) []*common.Certificate { out, err := ToCommonProtos(x509Certificates) panicOnError(err) return out @@ -37,25 +37,25 @@ func RequireToCommonFromPluginProtos(pbs []*plugintypes.X509Certificate) []*comm return out } -func RequireFromPluginProto(pb *plugintypes.X509Certificate) *x509.Certificate { +func RequireFromPluginProto(pb *plugintypes.X509Certificate) *X509Authority { out, err := FromPluginProto(pb) panicOnError(err) return out } -func RequireFromPluginProtos(pbs []*plugintypes.X509Certificate) []*x509.Certificate { +func RequireFromPluginProtos(pbs []*plugintypes.X509Certificate) []*X509Authority { out, err := FromPluginProtos(pbs) panicOnError(err) return out } -func RequireToPluginProto(x509Certificate *x509.Certificate) *plugintypes.X509Certificate { +func RequireToPluginProto(x509Certificate *X509Authority) *plugintypes.X509Certificate { out, err := ToPluginProto(x509Certificate) panicOnError(err) return out } -func RequireToPluginProtos(x509Certificates []*x509.Certificate) []*plugintypes.X509Certificate { +func RequireToPluginProtos(x509Certificates []*X509Authority) []*plugintypes.X509Certificate { out, err := ToPluginProtos(x509Certificates) panicOnError(err) return out @@ -67,52 +67,16 @@ func RequireToPluginFromCommonProtos(pbs []*common.Certificate) []*plugintypes.X return out } -func RequireRawFromCommonProto(pb *common.Certificate) []byte { - out, err := RawFromCommonProto(pb) +func RequireToPluginFromCertificates(x509Certificates []*x509.Certificate) []*plugintypes.X509Certificate { + pbs, err := ToPluginFromCertificates(x509Certificates) panicOnError(err) - return out -} - -func RequireRawFromCommonProtos(pbs []*common.Certificate) [][]byte { - out, err := RawFromCommonProtos(pbs) - panicOnError(err) - return out -} - -func RequireRawToCommonProto(rawX509Certificate []byte) *common.Certificate { - out, err := RawToCommonProto(rawX509Certificate) - panicOnError(err) - return out -} - -func RequireRawToCommonProtos(rawX509Certificates [][]byte) []*common.Certificate { - out, err := RawToCommonProtos(rawX509Certificates) - panicOnError(err) - return out + return pbs } -func RequireRawFromPluginProto(pb *plugintypes.X509Certificate) []byte { - out, err := RawFromPluginProto(pb) +func RequireToPluginFromCertificate(x509Certificate *x509.Certificate) *plugintypes.X509Certificate { + pb, err := ToPluginFromCertificate(x509Certificate) panicOnError(err) - return out -} - -func RequireRawFromPluginProtos(pbs []*plugintypes.X509Certificate) [][]byte { - out, err := RawFromPluginProtos(pbs) - panicOnError(err) - return out -} - -func RequireRawToPluginProto(rawX509Certificate []byte) *plugintypes.X509Certificate { - out, err := RawToPluginProto(rawX509Certificate) - panicOnError(err) - return out -} - -func RequireRawToPluginProtos(rawX509Certificates [][]byte) []*plugintypes.X509Certificate { - out, err := RawToPluginProtos(rawX509Certificates) - panicOnError(err) - return out + return pb } func panicOnError(err error) { diff --git a/pkg/common/coretypes/x509certificate/x509certificate.go b/pkg/common/coretypes/x509certificate/x509certificate.go index 4bd2214cf08..604c9de447f 100644 --- a/pkg/common/coretypes/x509certificate/x509certificate.go +++ b/pkg/common/coretypes/x509certificate/x509certificate.go @@ -6,7 +6,14 @@ import ( "fmt" ) -func fromProtoFields(asn1 []byte) (*x509.Certificate, error) { +// TODO: may we call it Authority? +// TODO: may we add subjectKeyID? +type X509Authority struct { + Certificate *x509.Certificate + Tainted bool +} + +func fromProtoFields(asn1 []byte, tainted bool) (*X509Authority, error) { if len(asn1) == 0 { return nil, errors.New("missing X.509 certificate data") } @@ -14,24 +21,30 @@ func fromProtoFields(asn1 []byte) (*x509.Certificate, error) { if err != nil { return nil, fmt.Errorf("failed to parse X.509 certificate data: %w", err) } - return x509Certificate, nil + return &X509Authority{ + Certificate: x509Certificate, + Tainted: tainted, + }, nil } -func rawFromProtoFields(asn1 []byte) ([]byte, error) { - cert, err := fromProtoFields(asn1) - if err != nil { - return nil, err +func toProtoFields(x509Authority *X509Authority) ([]byte, bool, error) { + if x509Authority == nil { + return nil, false, errors.New("missing x509 authority") + } + if err := validateX509Certificate(x509Authority.Certificate); err != nil { + return nil, false, err } - return cert.Raw, nil -} -func toProtoFields(x509Certificate *x509.Certificate) ([]byte, error) { - return rawToProtoFields(x509Certificate.Raw) + return x509Authority.Certificate.Raw, x509Authority.Tainted, nil } -func rawToProtoFields(asn1 []byte) ([]byte, error) { - if len(asn1) == 0 { - return nil, errors.New("missing X.509 certificate data") +func validateX509Certificate(cert *x509.Certificate) error { + switch { + case cert == nil: + return errors.New("missing X.509 certificate") + case len(cert.Raw) == 0: + return errors.New("missing X.509 certificate data") + default: + return nil } - return asn1, nil } diff --git a/pkg/common/coretypes/x509certificate/x509certificate_test.go b/pkg/common/coretypes/x509certificate/x509certificate_test.go index ba2b431ec63..d46f2b96f47 100644 --- a/pkg/common/coretypes/x509certificate/x509certificate_test.go +++ b/pkg/common/coretypes/x509certificate/x509certificate_test.go @@ -34,24 +34,32 @@ DgYDVR0PAQH/BAQDAgeAMB8GA1UdIwQYMBaAFAXjxsTxL8UIBZl5lheqqaDOcBhN MAoGCCqGSM49BAMCA0cAMEQCIHf/4m7fPB238z+aPaCuMj019SgA9o3ocdj0yvTx ozrYAiBrdSwMwUG795ZY1D5lh5s0mHb98muSjR3EoPPSiadJtA== -----END CERTIFICATE-----`) - root, _ = pemutil.ParseCertificate(rootPEM) - leaf, _ = pemutil.ParseCertificate(leafPEM) - empty = &x509.Certificate{} - junk = []byte("JUNK") - pluginRoot = &plugintypes.X509Certificate{Asn1: root.Raw} - pluginLeaf = &plugintypes.X509Certificate{Asn1: leaf.Raw} - pluginEmpty = &plugintypes.X509Certificate{} - pluginBad = &plugintypes.X509Certificate{Asn1: junk} - commonRoot = &common.Certificate{DerBytes: root.Raw} - commonLeaf = &common.Certificate{DerBytes: leaf.Raw} - commonEmpty = &common.Certificate{} - commonBad = &common.Certificate{DerBytes: junk} - apiLeaf = &apitypes.X509Certificate{Asn1: leaf.Raw} - apiEmpty = &apitypes.X509Certificate{} + root, _ = pemutil.ParseCertificate(rootPEM) + leaf, _ = pemutil.ParseCertificate(leafPEM) + rootAuthority = &x509certificate.X509Authority{Certificate: root} + leafAuthority = &x509certificate.X509Authority{Certificate: leaf} + leafTaintedAuthority = &x509certificate.X509Authority{Certificate: leaf, Tainted: true} + empty = &x509.Certificate{} + emptyAuthority = &x509certificate.X509Authority{} + emptyAuthorityCert = &x509certificate.X509Authority{Certificate: &x509.Certificate{}} + junk = []byte("JUNK") + pluginRoot = &plugintypes.X509Certificate{Asn1: root.Raw} + pluginLeaf = &plugintypes.X509Certificate{Asn1: leaf.Raw} + pluginTaintedLeaf = &plugintypes.X509Certificate{Asn1: leaf.Raw, Tainted: true} + pluginEmpty = &plugintypes.X509Certificate{} + pluginBad = &plugintypes.X509Certificate{Asn1: junk} + commonRoot = &common.Certificate{DerBytes: root.Raw} + commonLeaf = &common.Certificate{DerBytes: leaf.Raw} + commonTaintedLeaf = &common.Certificate{DerBytes: leaf.Raw, TaintedKey: true} + commonEmpty = &common.Certificate{} + commonBad = &common.Certificate{DerBytes: junk} + apiLeaf = &apitypes.X509Certificate{Asn1: leaf.Raw} + apiTaintedLeaf = &apitypes.X509Certificate{Asn1: leaf.Raw, Tainted: true} + apiEmpty = &apitypes.X509Certificate{} ) func TestFromCommonProto(t *testing.T) { - assertOK := func(t *testing.T, in *common.Certificate, expectOut *x509.Certificate) { + assertOK := func(t *testing.T, in *common.Certificate, expectOut *x509certificate.X509Authority) { actualOut, err := x509certificate.FromCommonProto(in) require.NoError(t, err) assertX509CertificateEqual(t, expectOut, actualOut) @@ -65,13 +73,14 @@ func TestFromCommonProto(t *testing.T) { assert.Panics(t, func() { x509certificate.RequireFromCommonProto(in) }) } - assertOK(t, commonLeaf, leaf) + assertOK(t, commonLeaf, leafAuthority) + assertOK(t, commonTaintedLeaf, leafTaintedAuthority) assertFail(t, commonEmpty, "missing X.509 certificate data") assertFail(t, commonBad, "failed to parse X.509 certificate data: ") } func TestFromCommonProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*common.Certificate, expectOut []*x509.Certificate) { + assertOK := func(t *testing.T, in []*common.Certificate, expectOut []*x509certificate.X509Authority) { actualOut, err := x509certificate.FromCommonProtos(in) require.NoError(t, err) assertX509CertificatesEqual(t, expectOut, actualOut) @@ -85,47 +94,52 @@ func TestFromCommonProtos(t *testing.T) { assert.Panics(t, func() { x509certificate.RequireFromCommonProtos(in) }) } - assertOK(t, []*common.Certificate{commonLeaf, commonRoot}, []*x509.Certificate{leaf, root}) + assertOK(t, []*common.Certificate{commonLeaf, commonRoot}, []*x509certificate.X509Authority{leafAuthority, rootAuthority}) + assertOK(t, []*common.Certificate{commonTaintedLeaf, commonRoot}, []*x509certificate.X509Authority{leafTaintedAuthority, rootAuthority}) assertFail(t, []*common.Certificate{commonEmpty}, "missing X.509 certificate data") assertOK(t, nil, nil) } func TestToCommonProto(t *testing.T) { - assertOK := func(t *testing.T, in *x509.Certificate, expectOut *common.Certificate) { + assertOK := func(t *testing.T, in *x509certificate.X509Authority, expectOut *common.Certificate) { actualOut, err := x509certificate.ToCommonProto(in) require.NoError(t, err) spiretest.AssertProtoEqual(t, expectOut, actualOut) assert.NotPanics(t, func() { spiretest.AssertProtoEqual(t, expectOut, x509certificate.RequireToCommonProto(in)) }) } - assertFail := func(t *testing.T, in *x509.Certificate, expectErr string) { + assertFail := func(t *testing.T, in *x509certificate.X509Authority, expectErr string) { actualOut, err := x509certificate.ToCommonProto(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) assert.Panics(t, func() { x509certificate.RequireToCommonProto(in) }) } - assertOK(t, leaf, commonLeaf) - assertFail(t, empty, "missing X.509 certificate data") + assertOK(t, leafAuthority, commonLeaf) + assertFail(t, nil, "missing x509 authority") + assertFail(t, emptyAuthority, "missing X.509 certificate") + assertFail(t, emptyAuthorityCert, "missing X.509 certificate data") } func TestToCommonProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*x509.Certificate, expectOut []*common.Certificate) { + assertOK := func(t *testing.T, in []*x509certificate.X509Authority, expectOut []*common.Certificate) { actualOut, err := x509certificate.ToCommonProtos(in) require.NoError(t, err) spiretest.AssertProtoListEqual(t, expectOut, actualOut) assert.NotPanics(t, func() { spiretest.AssertProtoListEqual(t, expectOut, x509certificate.RequireToCommonProtos(in)) }) } - assertFail := func(t *testing.T, in []*x509.Certificate, expectErr string) { + assertFail := func(t *testing.T, in []*x509certificate.X509Authority, expectErr string) { actualOut, err := x509certificate.ToCommonProtos(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) assert.Panics(t, func() { x509certificate.RequireToCommonProtos(in) }) } - assertOK(t, []*x509.Certificate{leaf}, []*common.Certificate{commonLeaf}) - assertFail(t, []*x509.Certificate{empty}, "missing X.509 certificate data") + assertOK(t, []*x509certificate.X509Authority{leafAuthority}, []*common.Certificate{commonLeaf}) + assertOK(t, []*x509certificate.X509Authority{leafTaintedAuthority}, []*common.Certificate{commonTaintedLeaf}) + assertFail(t, []*x509certificate.X509Authority{emptyAuthority}, "missing X.509 certificate") + assertFail(t, []*x509certificate.X509Authority{emptyAuthorityCert}, "missing X.509 certificate data") assertOK(t, nil, nil) } @@ -146,13 +160,14 @@ func TestToCommonFromPluginProtos(t *testing.T) { assert.Panics(t, func() { x509certificate.RequireToCommonFromPluginProtos(in) }) } - assertOK(t, []*plugintypes.X509Certificate{pluginLeaf}, []*common.Certificate{commonLeaf}) + assertOK(t, []*plugintypes.X509Certificate{pluginLeaf, pluginTaintedLeaf}, + []*common.Certificate{commonLeaf, commonTaintedLeaf}) assertFail(t, []*plugintypes.X509Certificate{pluginEmpty}, "missing X.509 certificate data") assertOK(t, nil, nil) } func TestFromPluginProto(t *testing.T) { - assertOK := func(t *testing.T, in *plugintypes.X509Certificate, expectOut *x509.Certificate) { + assertOK := func(t *testing.T, in *plugintypes.X509Certificate, expectOut *x509certificate.X509Authority) { actualOut, err := x509certificate.FromPluginProto(in) require.NoError(t, err) assertX509CertificateEqual(t, expectOut, actualOut) @@ -166,13 +181,13 @@ func TestFromPluginProto(t *testing.T) { assert.Panics(t, func() { x509certificate.RequireFromPluginProto(in) }) } - assertOK(t, pluginLeaf, leaf) + assertOK(t, pluginLeaf, leafAuthority) assertFail(t, pluginEmpty, "missing X.509 certificate data") assertFail(t, pluginBad, "failed to parse X.509 certificate data: ") } func TestFromPluginProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*plugintypes.X509Certificate, expectOut []*x509.Certificate) { + assertOK := func(t *testing.T, in []*plugintypes.X509Certificate, expectOut []*x509certificate.X509Authority) { actualOut, err := x509certificate.FromPluginProtos(in) require.NoError(t, err) assertX509CertificatesEqual(t, expectOut, actualOut) @@ -186,47 +201,50 @@ func TestFromPluginProtos(t *testing.T) { assert.Panics(t, func() { x509certificate.RequireFromPluginProtos(in) }) } - assertOK(t, []*plugintypes.X509Certificate{pluginLeaf, pluginRoot}, []*x509.Certificate{leaf, root}) + assertOK(t, []*plugintypes.X509Certificate{pluginLeaf, pluginRoot}, []*x509certificate.X509Authority{leafAuthority, rootAuthority}) assertFail(t, []*plugintypes.X509Certificate{pluginEmpty}, "missing X.509 certificate data") assertOK(t, nil, nil) } func TestToPluginProto(t *testing.T) { - assertOK := func(t *testing.T, in *x509.Certificate, expectOut *plugintypes.X509Certificate) { + assertOK := func(t *testing.T, in *x509certificate.X509Authority, expectOut *plugintypes.X509Certificate) { actualOut, err := x509certificate.ToPluginProto(in) require.NoError(t, err) spiretest.AssertProtoEqual(t, expectOut, actualOut) assert.NotPanics(t, func() { spiretest.AssertProtoEqual(t, expectOut, x509certificate.RequireToPluginProto(in)) }) } - assertFail := func(t *testing.T, in *x509.Certificate, expectErr string) { + assertFail := func(t *testing.T, in *x509certificate.X509Authority, expectErr string) { actualOut, err := x509certificate.ToPluginProto(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) assert.Panics(t, func() { x509certificate.RequireToPluginProto(in) }) } - assertOK(t, leaf, pluginLeaf) - assertFail(t, empty, "missing X.509 certificate data") + assertOK(t, leafAuthority, pluginLeaf) + assertFail(t, nil, "missing x509 authority") + assertFail(t, emptyAuthority, "missing X.509 certificate") + assertFail(t, emptyAuthorityCert, "missing X.509 certificate data") } func TestToPluginProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*x509.Certificate, expectOut []*plugintypes.X509Certificate) { + assertOK := func(t *testing.T, in []*x509certificate.X509Authority, expectOut []*plugintypes.X509Certificate) { actualOut, err := x509certificate.ToPluginProtos(in) require.NoError(t, err) spiretest.AssertProtoListEqual(t, expectOut, actualOut) assert.NotPanics(t, func() { spiretest.AssertProtoListEqual(t, expectOut, x509certificate.RequireToPluginProtos(in)) }) } - assertFail := func(t *testing.T, in []*x509.Certificate, expectErr string) { + assertFail := func(t *testing.T, in []*x509certificate.X509Authority, expectErr string) { actualOut, err := x509certificate.ToPluginProtos(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) assert.Panics(t, func() { x509certificate.RequireToPluginProtos(in) }) } - assertOK(t, []*x509.Certificate{leaf}, []*plugintypes.X509Certificate{pluginLeaf}) - assertFail(t, []*x509.Certificate{empty}, "missing X.509 certificate data") + assertOK(t, []*x509certificate.X509Authority{leafAuthority}, []*plugintypes.X509Certificate{pluginLeaf}) + assertFail(t, []*x509certificate.X509Authority{emptyAuthority}, "missing X.509 certificate") + assertFail(t, []*x509certificate.X509Authority{emptyAuthorityCert}, "missing X.509 certificate data") assertOK(t, nil, nil) } @@ -247,209 +265,89 @@ func TestToPluginFromCommonProtos(t *testing.T) { assert.Panics(t, func() { x509certificate.RequireToPluginFromCommonProtos(in) }) } - assertOK(t, []*common.Certificate{commonLeaf}, []*plugintypes.X509Certificate{pluginLeaf}) + assertOK(t, []*common.Certificate{commonLeaf, commonTaintedLeaf}, []*plugintypes.X509Certificate{pluginLeaf, pluginTaintedLeaf}) assertFail(t, []*common.Certificate{commonEmpty}, "missing X.509 certificate data") assertOK(t, nil, nil) } -func TestRawFromCommonProto(t *testing.T) { - assertOK := func(t *testing.T, in *common.Certificate, expectOut []byte) { - actualOut, err := x509certificate.RawFromCommonProto(in) - require.NoError(t, err) - assert.Equal(t, expectOut, actualOut) - assert.NotPanics(t, func() { assert.Equal(t, expectOut, x509certificate.RequireRawFromCommonProto(in)) }) - } - - assertFail := func(t *testing.T, in *common.Certificate, expectErr string) { - actualOut, err := x509certificate.RawFromCommonProto(in) - spiretest.RequireErrorPrefix(t, err, expectErr) - assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawFromCommonProto(in) }) - } - - assertOK(t, commonLeaf, leaf.Raw) - assertFail(t, commonEmpty, "missing X.509 certificate data") - assertFail(t, commonBad, "failed to parse X.509 certificate data: ") -} - -func TestRawFromCommonProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*common.Certificate, expectOut [][]byte) { - actualOut, err := x509certificate.RawFromCommonProtos(in) - require.NoError(t, err) - assert.Equal(t, expectOut, actualOut) - assert.NotPanics(t, func() { assert.Equal(t, expectOut, x509certificate.RequireRawFromCommonProtos(in)) }) - } - - assertFail := func(t *testing.T, in []*common.Certificate, expectErr string) { - actualOut, err := x509certificate.RawFromCommonProtos(in) - spiretest.RequireErrorPrefix(t, err, expectErr) - assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawFromCommonProtos(in) }) - } - - assertOK(t, []*common.Certificate{commonLeaf, commonRoot}, [][]byte{leaf.Raw, root.Raw}) - assertFail(t, []*common.Certificate{commonEmpty}, "missing X.509 certificate data") - assertOK(t, nil, nil) -} - -func TestRawToCommonProto(t *testing.T) { - assertOK := func(t *testing.T, in []byte, expectOut *common.Certificate) { - actualOut, err := x509certificate.RawToCommonProto(in) +func TestToPluginFromAPIProto(t *testing.T) { + assertOK := func(t *testing.T, in *apitypes.X509Certificate, expectOut *plugintypes.X509Certificate) { + actualOut, err := x509certificate.ToPluginFromAPIProto(in) require.NoError(t, err) spiretest.AssertProtoEqual(t, expectOut, actualOut) - assert.NotPanics(t, func() { spiretest.AssertProtoEqual(t, expectOut, x509certificate.RequireRawToCommonProto(in)) }) - } - - assertFail := func(t *testing.T, in []byte, expectErr string) { - actualOut, err := x509certificate.RawToCommonProto(in) - spiretest.RequireErrorPrefix(t, err, expectErr) - assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawToCommonProto(in) }) - } - - assertOK(t, leaf.Raw, commonLeaf) - assertFail(t, empty.Raw, "missing X.509 certificate data") -} - -func TestRawToCommonProtos(t *testing.T) { - assertOK := func(t *testing.T, in [][]byte, expectOut []*common.Certificate) { - actualOut, err := x509certificate.RawToCommonProtos(in) - require.NoError(t, err) - spiretest.AssertProtoListEqual(t, expectOut, actualOut) - assert.NotPanics(t, func() { spiretest.AssertProtoListEqual(t, expectOut, x509certificate.RequireRawToCommonProtos(in)) }) - } - - assertFail := func(t *testing.T, in [][]byte, expectErr string) { - actualOut, err := x509certificate.RawToCommonProtos(in) - spiretest.RequireErrorPrefix(t, err, expectErr) - assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawToCommonProtos(in) }) - } - - assertOK(t, [][]byte{leaf.Raw}, []*common.Certificate{commonLeaf}) - assertFail(t, [][]byte{empty.Raw}, "missing X.509 certificate data") - assertOK(t, nil, nil) -} - -func TestRawFromPluginProto(t *testing.T) { - assertOK := func(t *testing.T, in *plugintypes.X509Certificate, expectOut []byte) { - actualOut, err := x509certificate.RawFromPluginProto(in) - require.NoError(t, err) - assert.Equal(t, expectOut, actualOut) - assert.NotPanics(t, func() { assert.Equal(t, expectOut, x509certificate.RequireRawFromPluginProto(in)) }) - } - - assertFail := func(t *testing.T, in *plugintypes.X509Certificate, expectErr string) { - actualOut, err := x509certificate.RawFromPluginProto(in) - spiretest.RequireErrorPrefix(t, err, expectErr) - assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawFromPluginProto(in) }) } - assertOK(t, pluginLeaf, leaf.Raw) - assertFail(t, pluginEmpty, "missing X.509 certificate data") - assertFail(t, pluginBad, "failed to parse X.509 certificate data: ") -} - -func TestRawFromPluginProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*plugintypes.X509Certificate, expectOut [][]byte) { - actualOut, err := x509certificate.RawFromPluginProtos(in) - require.NoError(t, err) - assert.Equal(t, expectOut, actualOut) - assert.NotPanics(t, func() { assert.Equal(t, expectOut, x509certificate.RequireRawFromPluginProtos(in)) }) - } - - assertFail := func(t *testing.T, in []*plugintypes.X509Certificate, expectErr string) { - actualOut, err := x509certificate.RawFromPluginProtos(in) + assertFail := func(t *testing.T, in *apitypes.X509Certificate, expectErr string) { + actualOut, err := x509certificate.ToPluginFromAPIProto(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawFromPluginProtos(in) }) } - assertOK(t, []*plugintypes.X509Certificate{pluginLeaf, pluginRoot}, [][]byte{leaf.Raw, root.Raw}) - assertFail(t, []*plugintypes.X509Certificate{pluginEmpty}, "missing X.509 certificate data") + assertOK(t, apiLeaf, pluginLeaf) + assertOK(t, apiTaintedLeaf, pluginTaintedLeaf) + assertFail(t, apiEmpty, "missing X.509 certificate data") assertOK(t, nil, nil) } -func TestRawToPluginProto(t *testing.T) { - assertOK := func(t *testing.T, in []byte, expectOut *plugintypes.X509Certificate) { - actualOut, err := x509certificate.RawToPluginProto(in) - require.NoError(t, err) - spiretest.AssertProtoEqual(t, expectOut, actualOut) - assert.NotPanics(t, func() { spiretest.AssertProtoEqual(t, expectOut, x509certificate.RequireRawToPluginProto(in)) }) - } - - assertFail := func(t *testing.T, in []byte, expectErr string) { - actualOut, err := x509certificate.RawToPluginProto(in) - spiretest.RequireErrorPrefix(t, err, expectErr) - assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawToPluginProto(in) }) - } - - assertOK(t, leaf.Raw, pluginLeaf) - assertFail(t, empty.Raw, "missing X.509 certificate data") -} - -func TestRawToPluginProtos(t *testing.T) { - assertOK := func(t *testing.T, in [][]byte, expectOut []*plugintypes.X509Certificate) { - actualOut, err := x509certificate.RawToPluginProtos(in) +func TestToPluginFromAPIProtos(t *testing.T) { + assertOK := func(t *testing.T, in []*apitypes.X509Certificate, expectOut []*plugintypes.X509Certificate) { + actualOut, err := x509certificate.ToPluginFromAPIProtos(in) require.NoError(t, err) spiretest.AssertProtoListEqual(t, expectOut, actualOut) - assert.NotPanics(t, func() { spiretest.AssertProtoListEqual(t, expectOut, x509certificate.RequireRawToPluginProtos(in)) }) } - assertFail := func(t *testing.T, in [][]byte, expectErr string) { - actualOut, err := x509certificate.RawToPluginProtos(in) + assertFail := func(t *testing.T, in []*apitypes.X509Certificate, expectErr string) { + actualOut, err := x509certificate.ToPluginFromAPIProtos(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) - assert.Panics(t, func() { x509certificate.RequireRawToPluginProtos(in) }) } - assertOK(t, [][]byte{leaf.Raw}, []*plugintypes.X509Certificate{pluginLeaf}) - assertFail(t, [][]byte{empty.Raw}, "missing X.509 certificate data") + assertOK(t, []*apitypes.X509Certificate{apiLeaf, apiTaintedLeaf}, + []*plugintypes.X509Certificate{pluginLeaf, pluginTaintedLeaf}) + assertFail(t, []*apitypes.X509Certificate{apiEmpty}, "missing X.509 certificate data") assertOK(t, nil, nil) } -func TestToPluginFromAPIProto(t *testing.T) { - assertOK := func(t *testing.T, in *apitypes.X509Certificate, expectOut *plugintypes.X509Certificate) { - actualOut, err := x509certificate.ToPluginFromAPIProto(in) +func TestToPluginFromCertificate(t *testing.T) { + assertOK := func(t *testing.T, in *x509.Certificate, expectOut *plugintypes.X509Certificate) { + actualOut, err := x509certificate.ToPluginFromCertificate(in) require.NoError(t, err) spiretest.AssertProtoEqual(t, expectOut, actualOut) } - assertFail := func(t *testing.T, in *apitypes.X509Certificate, expectErr string) { - actualOut, err := x509certificate.ToPluginFromAPIProto(in) + assertFail := func(t *testing.T, in *x509.Certificate, expectErr string) { + actualOut, err := x509certificate.ToPluginFromCertificate(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) } - assertOK(t, apiLeaf, pluginLeaf) - assertFail(t, apiEmpty, "missing X.509 certificate data") - assertOK(t, nil, nil) + assertOK(t, leaf, pluginLeaf) + assertFail(t, nil, "missing X.509 certificate") + assertFail(t, empty, "missing X.509 certificate data") } -func TestToPluginFromAPIProtos(t *testing.T) { - assertOK := func(t *testing.T, in []*apitypes.X509Certificate, expectOut []*plugintypes.X509Certificate) { - actualOut, err := x509certificate.ToPluginFromAPIProtos(in) +func TestToPluginFromCertificates(t *testing.T) { + assertOK := func(t *testing.T, in []*x509.Certificate, expectOut []*plugintypes.X509Certificate) { + actualOut, err := x509certificate.ToPluginFromCertificates(in) require.NoError(t, err) spiretest.AssertProtoListEqual(t, expectOut, actualOut) } - assertFail := func(t *testing.T, in []*apitypes.X509Certificate, expectErr string) { - actualOut, err := x509certificate.ToPluginFromAPIProtos(in) + assertFail := func(t *testing.T, in []*x509.Certificate, expectErr string) { + actualOut, err := x509certificate.ToPluginFromCertificates(in) spiretest.RequireErrorPrefix(t, err, expectErr) assert.Empty(t, actualOut) } - assertOK(t, []*apitypes.X509Certificate{apiLeaf}, []*plugintypes.X509Certificate{pluginLeaf}) - assertFail(t, []*apitypes.X509Certificate{apiEmpty}, "missing X.509 certificate data") + assertOK(t, []*x509.Certificate{leaf}, + []*plugintypes.X509Certificate{pluginLeaf}) + assertFail(t, []*x509.Certificate{empty}, "missing X.509 certificate data") assertOK(t, nil, nil) } - -func assertX509CertificatesEqual(t *testing.T, expected, actual []*x509.Certificate) { +func assertX509CertificatesEqual(t *testing.T, expected, actual []*x509certificate.X509Authority) { assert.Empty(t, cmp.Diff(expected, actual)) } -func assertX509CertificateEqual(t *testing.T, expected, actual *x509.Certificate) { +func assertX509CertificateEqual(t *testing.T, expected, actual *x509certificate.X509Authority) { assert.Empty(t, cmp.Diff(expected, actual)) } diff --git a/pkg/common/plugin/httpchallenge/httpchallenge.go b/pkg/common/plugin/httpchallenge/httpchallenge.go new file mode 100644 index 00000000000..2f3e7e36f8c --- /dev/null +++ b/pkg/common/plugin/httpchallenge/httpchallenge.go @@ -0,0 +1,115 @@ +package httpchallenge + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/idutil" +) + +const ( + nonceLen = 32 + + // PluginName for http based attestor + PluginName = "http_challenge" +) + +type AttestationData struct { + HostName string `json:"hostname"` + AgentName string `json:"agentname"` + Port int `json:"port"` +} + +type Challenge struct { + Nonce string `json:"nonce"` +} + +type Response struct { +} + +func GenerateChallenge(forceNonce string) (*Challenge, error) { + nonce := forceNonce + if nonce == "" { + var err error + nonce, err = generateNonce() + if err != nil { + return nil, err + } + } + return &Challenge{Nonce: nonce}, nil +} + +func CalculateResponse(_ *Challenge) (*Response, error) { + return &Response{}, nil +} + +func VerifyChallenge(ctx context.Context, client *http.Client, attestationData *AttestationData, challenge *Challenge) error { + if attestationData.HostName == "" { + return fmt.Errorf("hostname must be set") + } + if attestationData.AgentName == "" { + return fmt.Errorf("agentname must be set") + } + if attestationData.Port <= 0 { + return fmt.Errorf("port is invalid") + } + if strings.Contains(attestationData.HostName, "/") { + return fmt.Errorf("hostname can not contain a slash") + } + if strings.Contains(attestationData.HostName, ":") { + return fmt.Errorf("hostname can not contain a colon") + } + if strings.Contains(attestationData.AgentName, ".") { + return fmt.Errorf("agentname can not contain a dot") + } + turl := url.URL{ + Scheme: "http", + Host: net.JoinHostPort(attestationData.HostName, strconv.Itoa(attestationData.Port)), + Path: fmt.Sprintf("/.well-known/spiffe/nodeattestor/http_challenge/%s/challenge", attestationData.AgentName), + } + + req, err := http.NewRequestWithContext(ctx, "GET", turl.String(), nil) + if err != nil { + return err + } + + resp, err := client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := io.ReadAll(io.LimitReader(resp.Body, 64)) + if err != nil { + return err + } + nonce := strings.TrimSpace(string(body)) + if nonce != challenge.Nonce { + return fmt.Errorf("expected nonce %q but got %q", challenge.Nonce, body) + } + return nil +} + +// MakeAgentID creates an agent ID +func MakeAgentID(td spiffeid.TrustDomain, hostName string) (spiffeid.ID, error) { + agentPath := fmt.Sprintf("/http_challenge/%s", hostName) + + return idutil.AgentID(td, agentPath) +} + +func generateNonce() (string, error) { + b := make([]byte, nonceLen) + if _, err := rand.Read(b); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(b), nil +} diff --git a/pkg/common/plugin/httpchallenge/httpchallenge_test.go b/pkg/common/plugin/httpchallenge/httpchallenge_test.go new file mode 100644 index 00000000000..d81a6a15bfc --- /dev/null +++ b/pkg/common/plugin/httpchallenge/httpchallenge_test.go @@ -0,0 +1,101 @@ +package httpchallenge + +import ( + "context" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidateChallenge(t *testing.T) { + tests := []struct { + desc string + hostName string + agentName string + nonce string + testNonce string + expectErr string + }{ + { + desc: "bad hostName", + hostName: "foo/bar", + agentName: "ok", + nonce: "1234", + testNonce: "1234", + expectErr: "hostname can not contain a slash", + }, + { + desc: "bad hostName", + hostName: "foo:bar", + agentName: "ok", + nonce: "1234", + testNonce: "1234", + expectErr: "hostname can not contain a colon", + }, + { + desc: "bad agentName", + hostName: "foo.bar", + agentName: "not.ok", + nonce: "1234", + testNonce: "1234", + expectErr: "agentname can not contain a dot", + }, + { + desc: "fail nonce", + hostName: "foo.bar", + agentName: "ok", + nonce: "1234", + testNonce: "1235", + expectErr: "expected nonce \"1235\" but got \"1234\"", + }, + { + desc: "success", + hostName: "foo.bar", + agentName: "ok", + nonce: "1234", + testNonce: "1234", + expectErr: "", + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + ad := &AttestationData{ + HostName: tt.hostName, + AgentName: tt.agentName, + Port: 80, + } + c := &Challenge{ + Nonce: tt.testNonce, + } + + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + _, err := res.Write([]byte(tt.nonce)) + require.NoError(t, err) + })) + defer func() { testServer.Close() }() + + transport := &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + if addr == "foo.bar:80" { + addr = strings.TrimPrefix(testServer.URL, "http://") + } + dialer := &net.Dialer{} + return dialer.DialContext(ctx, network, addr) + }, + } + + err := VerifyChallenge(context.Background(), &http.Client{Transport: transport}, ad, c) + if tt.expectErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectErr) + return + } + require.NoError(t, err) + }) + } +} diff --git a/pkg/common/plugin/k8s/apiserver/client.go b/pkg/common/plugin/k8s/apiserver/client.go index 163f74c6c04..3f1bdeaf7cd 100644 --- a/pkg/common/plugin/k8s/apiserver/client.go +++ b/pkg/common/plugin/k8s/apiserver/client.go @@ -65,10 +65,6 @@ func (c *client) GetPod(ctx context.Context, namespace, podName string) (*v1.Pod return nil, fmt.Errorf("unable to query pods API: %w", err) } - if pod == nil { - return nil, fmt.Errorf("got nil pod for pod name: %v", podName) - } - return pod, nil } @@ -90,10 +86,6 @@ func (c *client) GetNode(ctx context.Context, nodeName string) (*v1.Node, error) return nil, fmt.Errorf("unable to query nodes API: %w", err) } - if node == nil { - return nil, fmt.Errorf("got nil node for node name: %v", nodeName) - } - return node, nil } @@ -119,10 +111,6 @@ func (c *client) ValidateToken(ctx context.Context, token string, audiences []st } // Evaluate token review response (review server will populate TokenReview.Status field) - if resp == nil { - return nil, errors.New("token review API response is nil") - } - if resp.Status.Error != "" { return nil, fmt.Errorf("token review API response contains an error: %v", resp.Status.Error) } diff --git a/pkg/common/plugin/k8s/apiserver/client_test.go b/pkg/common/plugin/k8s/apiserver/client_test.go index d0f77b43496..51250a7ee1b 100644 --- a/pkg/common/plugin/k8s/apiserver/client_test.go +++ b/pkg/common/plugin/k8s/apiserver/client_test.go @@ -154,7 +154,7 @@ func (s *ClientSuite) TestGetPodFailsToLoadClient() { } func (s *ClientSuite) TestGetPodFailsIfGetsErrorFromAPIServer() { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() client := s.createClient(fakeClient) pod, err := client.GetPod(ctx, "NAMESPACE", "PODNAME") @@ -162,8 +162,8 @@ func (s *ClientSuite) TestGetPodFailsIfGetsErrorFromAPIServer() { s.Nil(pod) } -func (s *ClientSuite) TestGetPodFailsIfGetsNilPod() { - fakeClient := fake.NewSimpleClientset() +func (s *ClientSuite) TestGetPodIsEmptyIfGetsNilPod() { + fakeClient := fake.NewClientset() fakeClient.CoreV1().(*fake_corev1.FakeCoreV1).PrependReactor("get", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, nil @@ -171,12 +171,12 @@ func (s *ClientSuite) TestGetPodFailsIfGetsNilPod() { client := s.createClient(fakeClient) pod, err := client.GetPod(ctx, "NAMESPACE", "PODNAME") - s.AssertErrorContains(err, "got nil pod for pod name: PODNAME") - s.Nil(pod) + s.NoError(err) + s.Require().Empty(pod) } func (s *ClientSuite) TestGetPodSucceeds() { - fakeClient := fake.NewSimpleClientset(createPod("PODNAME", "NAMESPACE")) + fakeClient := fake.NewClientset(createPod("PODNAME", "NAMESPACE")) expectedPod := createPod("PODNAME", "NAMESPACE") client := s.createClient(fakeClient) @@ -200,7 +200,7 @@ func (s *ClientSuite) TestGetNodeFailsToLoadClient() { } func (s *ClientSuite) TestGetNodeFailsIfGetsErrorFromAPIServer() { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() client := s.createClient(fakeClient) node, err := client.GetNode(ctx, "NODENAME") @@ -208,8 +208,8 @@ func (s *ClientSuite) TestGetNodeFailsIfGetsErrorFromAPIServer() { s.Nil(node) } -func (s *ClientSuite) TestGetNodeFailsIfGetsNilNode() { - fakeClient := fake.NewSimpleClientset() +func (s *ClientSuite) TestGetNodeIsEmptyIfGetsNilNode() { + fakeClient := fake.NewClientset() fakeClient.CoreV1().(*fake_corev1.FakeCoreV1).PrependReactor("get", "nodes", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, nil @@ -217,12 +217,12 @@ func (s *ClientSuite) TestGetNodeFailsIfGetsNilNode() { client := s.createClient(fakeClient) node, err := client.GetNode(ctx, "NODENAME") - s.AssertErrorContains(err, "got nil node for node name: NODENAME") - s.Nil(node) + s.Require().NoError(err) + s.Require().Empty(node) } func (s *ClientSuite) TestGetNodeSucceeds() { - fakeClient := fake.NewSimpleClientset(createNode("NODENAME")) + fakeClient := fake.NewClientset(createNode("NODENAME")) expectedNode := createNode("NODENAME") client := s.createClient(fakeClient) @@ -239,7 +239,7 @@ func (s *ClientSuite) TestValidateTokenFailsToLoadClient() { } func (s *ClientSuite) TestValidateTokenFailsIfGetsErrorFromAPIServer() { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() fakeClient.AuthenticationV1().(*fake_authv1.FakeAuthenticationV1).PrependReactor("create", "tokenreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &authv1.TokenReview{}, errors.New("error creating token review") @@ -251,8 +251,8 @@ func (s *ClientSuite) TestValidateTokenFailsIfGetsErrorFromAPIServer() { s.Nil(status) } -func (s *ClientSuite) TestValidateTokenFailsIfGetsNilResponse() { - fakeClient := fake.NewSimpleClientset() +func (s *ClientSuite) TestValidateTokenIsEmptyIfGetsNilResponse() { + fakeClient := fake.NewClientset() fakeClient.AuthenticationV1().(*fake_authv1.FakeAuthenticationV1).PrependReactor("create", "tokenreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, nil @@ -260,12 +260,12 @@ func (s *ClientSuite) TestValidateTokenFailsIfGetsNilResponse() { client := s.createClient(fakeClient) status, err := client.ValidateToken(ctx, testToken, []string{"aud1"}) - s.AssertErrorContains(err, "token review API response is nil") - s.Nil(status) + s.Require().NoError(err) + s.Require().Empty(status) } func (s *ClientSuite) TestValidateTokenFailsIfStatusContainsError() { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() fakeClient.AuthenticationV1().(*fake_authv1.FakeAuthenticationV1).PrependReactor("create", "tokenreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &authv1.TokenReview{Status: authv1.TokenReviewStatus{Error: "an error"}}, nil @@ -278,7 +278,7 @@ func (s *ClientSuite) TestValidateTokenFailsIfStatusContainsError() { } func (s *ClientSuite) TestValidateTokenFailsDueToAudienceUnawareValidator() { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() fakeClient.AuthenticationV1().(*fake_authv1.FakeAuthenticationV1).PrependReactor("create", "tokenreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &authv1.TokenReview{ @@ -296,7 +296,7 @@ func (s *ClientSuite) TestValidateTokenFailsDueToAudienceUnawareValidator() { } func (s *ClientSuite) TestValidateTokenSucceeds() { - fakeClient := fake.NewSimpleClientset() + fakeClient := fake.NewClientset() fakeClient.AuthenticationV1().(*fake_authv1.FakeAuthenticationV1).PrependReactor("create", "tokenreviews", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &authv1.TokenReview{ diff --git a/pkg/common/telemetry/names.go b/pkg/common/telemetry/names.go index e94765de7c2..f88341b364d 100644 --- a/pkg/common/telemetry/names.go +++ b/pkg/common/telemetry/names.go @@ -344,6 +344,9 @@ const ( // IDType tags some type of ID (eg. registration ID, SPIFFE ID...) IDType = "id_type" + // ImageID tags the image identifier in the format "repository@sha256:digest" + ImageID = "image_id" + // IssuedAt tags an issuance timestamp IssuedAt = "issued_at" @@ -439,7 +442,7 @@ const ( Reason = "reason" // Reattestable declares if the agent should reattest when its SVID expires - Reattestable = "rettestable" + Reattestable = "reattestable" // Received tags a received value, as opposed to the one that is expected. Message should clarify // what kind of value was received, and a different field should show the expected value. @@ -541,6 +544,9 @@ const ( // with other tags to add clarity Subject = "subject" + // SubjectKeyID tags a certificate subject key ID + SubjectKeyID = "subject_key_id" + // SVIDMapSize is the gauge key for the size of the LRU cache SVID map SVIDMapSize = "lru_cache_svid_map_size" @@ -599,6 +605,9 @@ const ( // with other tags to add clarity Updated = "updated" + // UpstreamAuthorityID tags a signing authority ID + UpstreamAuthorityID = "upstream_authority_id" + // StoreSvid tags if entry is storable StoreSvid = "store_svid" @@ -654,6 +663,24 @@ const ( // Cache functionality related to a cache Cache = "cache" + // AgentsByIDCache functionality related to the agent btree cache indexed by ID + AgentsByIDCache = "agents_by_id_cache" + + // AgentsByExpiresAtCache functionality related to the agent btree cache indexed by ExpiresAt + AgentsByExpiresAtCache = "agents_by_expiresat_cache" + + // NodeAliasesByEntryIDCache functionality related to the node-aliases btree cache indexed by EntryID + NodeAliasesByEntryIDCache = "nodealiases_by_entryid_cache" + + // NodeAliasesBySelectorCache functionality related to the node-aliases btree cache indexed by Selector + NodeAliasesBySelectorCache = "nodealiases_by_selector_cache" + + // EntriesByEntryIDCache functionality related to the entries btree cache indexed by EntryID + EntriesByEntryIDCache = "entries_by_entryid_cache" + + // EntriesByParentIDCache functionality related to the entries btree cache indexed by ParentID + EntriesByParentIDCache = "entries_by_parentid_cache" + // Cache type tag CacheType = "cache_type" @@ -852,8 +879,11 @@ const ( // ListAgents functionality related to listing agents ListAgents = "list_agents" - // CountEntries functionality related to counting all registration entries - CountEntries = "count_entries" + // SkippedEntryEventIDs functionality related to counting missed entry event IDs + SkippedEntryEventIDs = "skipped_entry_event_ids" + + // SkippedNodeEventIDs functionality related to counting missed node event IDs + SkippedNodeEventIDs = "skipped_node_event_ids" // ListAllEntriesWithPages functionality related to listing all registration entries with pagination ListAllEntriesWithPages = "list_all_entries_with_pages" diff --git a/pkg/common/telemetry/server/datastore/wrapper.go b/pkg/common/telemetry/server/datastore/wrapper.go index 82ed815f94b..96f84bd0b00 100644 --- a/pkg/common/telemetry/server/datastore/wrapper.go +++ b/pkg/common/telemetry/server/datastore/wrapper.go @@ -2,7 +2,6 @@ package datastore import ( "context" - "crypto" "time" "github.com/spiffe/go-spiffe/v2/spiffeid" @@ -264,16 +263,16 @@ func (w metricsWrapper) SetBundle(ctx context.Context, bundle *common.Bundle) (_ return w.ds.SetBundle(ctx, bundle) } -func (w metricsWrapper) TaintX509CA(ctx context.Context, trustDomainID string, publicKeyToTaint crypto.PublicKey) (err error) { +func (w metricsWrapper) TaintX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToTaint string) (err error) { callCounter := StartTaintX509CAByKeyCall(w.m) defer callCounter.Done(&err) - return w.ds.TaintX509CA(ctx, trustDomainID, publicKeyToTaint) + return w.ds.TaintX509CA(ctx, trustDomainID, subjectKeyIDToTaint) } -func (w metricsWrapper) RevokeX509CA(ctx context.Context, trustDomainID string, publicKeyToRevoke crypto.PublicKey) (err error) { +func (w metricsWrapper) RevokeX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToRevoke string) (err error) { callCounter := StartRevokeX509CACall(w.m) defer callCounter.Done(&err) - return w.ds.RevokeX509CA(ctx, trustDomainID, publicKeyToRevoke) + return w.ds.RevokeX509CA(ctx, trustDomainID, subjectKeyIDToRevoke) } func (w metricsWrapper) TaintJWTKey(ctx context.Context, trustDomainID string, authorityID string) (_ *common.PublicKey, err error) { diff --git a/pkg/common/telemetry/server/datastore/wrapper_test.go b/pkg/common/telemetry/server/datastore/wrapper_test.go index 72e47349f6b..a0b44885a2a 100644 --- a/pkg/common/telemetry/server/datastore/wrapper_test.go +++ b/pkg/common/telemetry/server/datastore/wrapper_test.go @@ -2,7 +2,6 @@ package datastore import ( "context" - "crypto" "errors" "reflect" "strings" @@ -490,11 +489,11 @@ func (ds *fakeDataStore) SetBundle(context.Context, *common.Bundle) (*common.Bun return &common.Bundle{}, ds.err } -func (ds *fakeDataStore) TaintX509CA(context.Context, string, crypto.PublicKey) error { +func (ds *fakeDataStore) TaintX509CA(context.Context, string, string) error { return ds.err } -func (ds *fakeDataStore) RevokeX509CA(context.Context, string, crypto.PublicKey) error { +func (ds *fakeDataStore) RevokeX509CA(context.Context, string, string) error { return ds.err } diff --git a/pkg/common/telemetry/server/server.go b/pkg/common/telemetry/server/server.go index 67d1caf83b8..980d4218a28 100644 --- a/pkg/common/telemetry/server/server.go +++ b/pkg/common/telemetry/server/server.go @@ -7,3 +7,51 @@ import "github.com/spiffe/spire/pkg/common/telemetry" func SetEntryDeletedGauge(m telemetry.Metrics, deleted int) { m.SetGauge([]string{telemetry.Entry, telemetry.Deleted}, float32(deleted)) } + +// SetAgentsByIDCacheCountGauge emits a gauge with the number of agents by ID that are +// currently in the node cache. +func SetAgentsByIDCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Node, telemetry.AgentsByIDCache, telemetry.Count}, float32(size)) +} + +// SetAgentsByExpiresAtCacheCountGauge emits a gauge with the number of agents by expiresAt that are +// currently in the node cache. +func SetAgentsByExpiresAtCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Node, telemetry.AgentsByExpiresAtCache, telemetry.Count}, float32(size)) +} + +// SetSkippedNodeEventIDsCacheCountGauge emits a gauge with the number of entries that are +// currently in the skipped-node events cache. +func SetSkippedNodeEventIDsCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Node, telemetry.SkippedNodeEventIDs, telemetry.Count}, float32(size)) +} + +// SetNodeAliasesByEntryIDCacheCountGauge emits a gauge with the number of Node Aliases by EntryID that are +// currently in the entry cache. +func SetNodeAliasesByEntryIDCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Entry, telemetry.NodeAliasesByEntryIDCache, telemetry.Count}, float32(size)) +} + +// SetNodeAliasesBySelectorCacheCountGauge emits a gauge with the number of Node Aliases by Selector that are +// currently in the entry cache. +func SetNodeAliasesBySelectorCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Entry, telemetry.NodeAliasesBySelectorCache, telemetry.Count}, float32(size)) +} + +// SetEntriesByEntryIDCacheCountGauge emits a gauge with the number of entries by entryID that are +// currently in the entry cache. +func SetEntriesByEntryIDCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Entry, telemetry.EntriesByEntryIDCache, telemetry.Count}, float32(size)) +} + +// SetEntriesByParentIDCacheCountGauge emits a gauge with the number of entries by parentID that are +// currently in the entry cache. +func SetEntriesByParentIDCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Entry, telemetry.EntriesByParentIDCache, telemetry.Count}, float32(size)) +} + +// SetSkippedEntryEventIDsCacheCountGauge emits a gauge with the number of entries that are +// currently in the skipped-entry events cache. +func SetSkippedEntryEventIDsCacheCountGauge(m telemetry.Metrics, size int) { + m.SetGauge([]string{telemetry.Entry, telemetry.SkippedEntryEventIDs, telemetry.Count}, float32(size)) +} diff --git a/pkg/common/util/certs_test.go b/pkg/common/util/certs_test.go index 4c63c43c7f2..05bc74e709f 100644 --- a/pkg/common/util/certs_test.go +++ b/pkg/common/util/certs_test.go @@ -17,5 +17,5 @@ func TestLoadCertPool(t *testing.T) { // be ignored. pool, err := LoadCertPool("testdata/mixed-bundle.pem") require.NoError(err) - require.Len(pool.Subjects(), 2) //nolint:staticcheck // these pools are not system pools so the use of Subjects() is ok for now + require.Len(pool.Subjects(), 2) } diff --git a/pkg/common/util/task.go b/pkg/common/util/task.go index d6b5ac8ff46..27584f17f7d 100644 --- a/pkg/common/util/task.go +++ b/pkg/common/util/task.go @@ -28,7 +28,7 @@ func RunTasks(ctx context.Context, tasks ...func(context.Context) error) error { runTask := func(task func(context.Context) error) (err error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("panic: %v\n%s\n", r, string(debug.Stack())) //nolint: revive // newlines are intentional + err = fmt.Errorf("panic: %v\n%s\n", r, string(debug.Stack())) } wg.Done() }() @@ -68,7 +68,7 @@ func SerialRun(tasks ...func(context.Context) error) func(ctx context.Context) e return func(ctx context.Context) (err error) { defer func() { if r := recover(); r != nil { - err = fmt.Errorf("panic: %v\n%s\n", r, string(debug.Stack())) //nolint: revive // newlines are intentional + err = fmt.Errorf("panic: %v\n%s\n", r, string(debug.Stack())) } }() diff --git a/pkg/common/version/version.go b/pkg/common/version/version.go index cfc2d30467d..6cdcdf1809a 100644 --- a/pkg/common/version/version.go +++ b/pkg/common/version/version.go @@ -8,7 +8,7 @@ const ( // IMPORTANT: When updating, make sure to reconcile the versions list that // is part of the upgrade integration test. See // test/integration/suites/upgrade/README.md for details. - Base = "1.10.2" + Base = "1.11.0" ) var ( diff --git a/pkg/server/api/bundle.go b/pkg/server/api/bundle.go index 940cbc58ac9..c89e4729f85 100644 --- a/pkg/server/api/bundle.go +++ b/pkg/server/api/bundle.go @@ -37,7 +37,8 @@ func CertificatesToProto(rootCas []*common.Certificate) []*types.X509Certificate var x509Authorities []*types.X509Certificate for _, rootCA := range rootCas { x509Authorities = append(x509Authorities, &types.X509Certificate{ - Asn1: rootCA.DerBytes, + Asn1: rootCA.DerBytes, + Tainted: rootCA.TaintedKey, }) } @@ -50,6 +51,7 @@ func PublicKeysToProto(keys []*common.PublicKey) []*types.JWTKey { PublicKey: key.PkixBytes, KeyId: key.Kid, ExpiresAt: key.NotAfter, + Tainted: key.TaintedKey, }) } return jwtAuthorities diff --git a/pkg/server/api/bundle/v1/service_test.go b/pkg/server/api/bundle/v1/service_test.go index cba82c45e64..86ce239281d 100644 --- a/pkg/server/api/bundle/v1/service_test.go +++ b/pkg/server/api/bundle/v1/service_test.go @@ -2036,7 +2036,7 @@ func TestBatchCreateFederatedBundle(t *testing.T) { }, }, expectedResults: []*bundlev1.BatchCreateFederatedBundleResponse_Result{ - {Status: api.CreateStatus(codes.InvalidArgument, `failed to convert bundle: unable to parse X.509 authority: %v`, expectedX509Err)}, + {Status: api.CreateStatusf(codes.InvalidArgument, `failed to convert bundle: unable to parse X.509 authority: %v`, expectedX509Err)}, }, expectedLogMsgs: []spiretest.LogEntry{ { @@ -2787,7 +2787,7 @@ func TestBatchSetFederatedBundle(t *testing.T) { }, }, expectedResults: []*bundlev1.BatchSetFederatedBundleResponse_Result{ - {Status: api.CreateStatus(codes.InvalidArgument, `failed to convert bundle: unable to parse X.509 authority: %v`, expectedX509Err)}, + {Status: api.CreateStatusf(codes.InvalidArgument, `failed to convert bundle: unable to parse X.509 authority: %v`, expectedX509Err)}, }, expectedLogMsgs: []spiretest.LogEntry{ { diff --git a/pkg/server/api/bundle_test.go b/pkg/server/api/bundle_test.go index dad28528223..64007140e9d 100644 --- a/pkg/server/api/bundle_test.go +++ b/pkg/server/api/bundle_test.go @@ -31,13 +31,22 @@ func TestBundleToProto(t *testing.T) { TrustDomainId: td.IDString(), RefreshHint: 10, SequenceNumber: 42, - RootCas: []*common.Certificate{{DerBytes: []byte("cert-bytes")}}, + RootCas: []*common.Certificate{ + {DerBytes: []byte("cert-bytes")}, + {DerBytes: []byte("tainted-cert"), TaintedKey: true}, + }, JwtSigningKeys: []*common.PublicKey{ { Kid: "key-id-1", NotAfter: 1590514224, PkixBytes: []byte("pkix key"), }, + { + Kid: "key-id-2", + NotAfter: 1590514224, + PkixBytes: []byte("pkix key"), + TaintedKey: true, + }, }, }, expectBundle: &types.Bundle{ @@ -48,6 +57,10 @@ func TestBundleToProto(t *testing.T) { { Asn1: []byte("cert-bytes"), }, + { + Asn1: []byte("tainted-cert"), + Tainted: true, + }, }, JwtAuthorities: []*types.JWTKey{ { @@ -56,6 +69,12 @@ func TestBundleToProto(t *testing.T) { KeyId: "key-id-1", ExpiresAt: 1590514224, }, + { + PublicKey: []byte("pkix key"), + KeyId: "key-id-2", + ExpiresAt: 1590514224, + Tainted: true, + }, }, }, }, diff --git a/pkg/server/api/localauthority/v1/service.go b/pkg/server/api/localauthority/v1/service.go index b8e8978d42e..e2d66302c10 100644 --- a/pkg/server/api/localauthority/v1/service.go +++ b/pkg/server/api/localauthority/v1/service.go @@ -2,16 +2,14 @@ package localauthority import ( "context" - "crypto" - "crypto/x509" "errors" "fmt" + "strings" "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/v2/spiffeid" localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" "github.com/spiffe/spire/pkg/common/telemetry" - "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/pkg/server/api" "github.com/spiffe/spire/pkg/server/api/rpccontext" "github.com/spiffe/spire/pkg/server/ca/manager" @@ -33,6 +31,9 @@ type CAManager interface { GetNextX509CASlot() manager.Slot PrepareX509CA(ctx context.Context) error RotateX509CA(ctx context.Context) + + IsUpstreamAuthority() bool + NotifyTaintedX509Authority(ctx context.Context, authorityID string) error } // RegisterService registers the service on the gRPC server. @@ -335,6 +336,10 @@ func (s *Service) TaintX509Authority(ctx context.Context, req *localauthorityv1. log = log.WithField(telemetry.LocalAuthorityID, req.AuthorityId) } + if s.ca.IsUpstreamAuthority() { + return nil, api.MakeErr(log, codes.FailedPrecondition, "local authority can't be tainted if there is an upstream authority", nil) + } + nextSlot := s.ca.GetNextX509CASlot() switch { @@ -355,7 +360,7 @@ func (s *Service) TaintX509Authority(ctx context.Context, req *localauthorityv1. return nil, api.MakeErr(log, codes.InvalidArgument, "only Old local authorities can be tainted", fmt.Errorf("unsupported local authority status: %v", nextSlot.Status())) } - if err := s.ds.TaintX509CA(ctx, s.td.IDString(), nextSlot.PublicKey()); err != nil { + if err := s.ds.TaintX509CA(ctx, s.td.IDString(), nextSlot.AuthorityID()); err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to taint X.509 authority", err) } @@ -363,6 +368,10 @@ func (s *Service) TaintX509Authority(ctx context.Context, req *localauthorityv1. AuthorityId: nextSlot.AuthorityID(), } + if err := s.ca.NotifyTaintedX509Authority(ctx, nextSlot.AuthorityID()); err != nil { + return nil, api.MakeErr(log, codes.Internal, "failed to notify tainted authority", err) + } + rpccontext.AuditRPC(ctx) log.Info("X.509 authority tainted successfully") @@ -371,25 +380,58 @@ func (s *Service) TaintX509Authority(ctx context.Context, req *localauthorityv1. }, nil } +func (s *Service) TaintX509UpstreamAuthority(ctx context.Context, req *localauthorityv1.TaintX509UpstreamAuthorityRequest) (*localauthorityv1.TaintX509UpstreamAuthorityResponse, error) { + rpccontext.AddRPCAuditFields(ctx, buildAuditUpstreamLogFields(req.SubjectKeyId)) + log := rpccontext.Logger(ctx) + + if req.SubjectKeyId != "" { + log = log.WithField(telemetry.SubjectKeyID, req.SubjectKeyId) + } + + if !s.ca.IsUpstreamAuthority() { + return nil, api.MakeErr(log, codes.FailedPrecondition, "upstream authority is not configured", nil) + } + + // TODO: may we request in lower case? + // Normalize SKID + subjectKeyIDRequest := strings.ToLower(req.SubjectKeyId) + if err := s.validateUpstreamAuthoritySubjectKey(subjectKeyIDRequest); err != nil { + return nil, api.MakeErr(log, codes.InvalidArgument, "provided subject key id is not valid", err) + } + + if err := s.ds.TaintX509CA(ctx, s.td.IDString(), subjectKeyIDRequest); err != nil { + return nil, api.MakeErr(log, codes.Internal, "failed to taint upstream authority", err) + } + + rpccontext.AuditRPC(ctx) + log.Info("X.509 upstream authority tainted successfully") + + return &localauthorityv1.TaintX509UpstreamAuthorityResponse{}, nil +} + func (s *Service) RevokeX509Authority(ctx context.Context, req *localauthorityv1.RevokeX509AuthorityRequest) (*localauthorityv1.RevokeX509AuthorityResponse, error) { rpccontext.AddRPCAuditFields(ctx, buildAuditLogFields(req.AuthorityId)) log := rpccontext.Logger(ctx) - authorityID, publicKey, err := s.getX509PublicKey(ctx, req.AuthorityId) - if err != nil { - if req.AuthorityId != "" { - log = log.WithField(telemetry.LocalAuthorityID, req.AuthorityId) - } + if req.AuthorityId != "" { + log = log.WithField(telemetry.LocalAuthorityID, req.AuthorityId) + } + + if s.ca.IsUpstreamAuthority() { + return nil, api.MakeErr(log, codes.FailedPrecondition, "local authority can't be revoked if there is an upstream authority", nil) + } + + if err := s.validateLocalAuthorityID(req.AuthorityId); err != nil { return nil, api.MakeErr(log, codes.InvalidArgument, "invalid authority ID", err) } - log = log.WithField(telemetry.LocalAuthorityID, authorityID) - if err := s.ds.RevokeX509CA(ctx, s.td.IDString(), publicKey); err != nil { + log = log.WithField(telemetry.LocalAuthorityID, req.AuthorityId) + if err := s.ds.RevokeX509CA(ctx, s.td.IDString(), req.AuthorityId); err != nil { return nil, api.MakeErr(log, codes.Internal, "failed to revoke X.509 authority", err) } state := &localauthorityv1.AuthorityState{ - AuthorityId: authorityID, + AuthorityId: req.AuthorityId, } rpccontext.AuditRPC(ctx) @@ -400,44 +442,72 @@ func (s *Service) RevokeX509Authority(ctx context.Context, req *localauthorityv1 }, nil } -// getX509PublicKey validates provided authority ID, and return OLD associated public key -func (s *Service) getX509PublicKey(ctx context.Context, authorityID string) (string, crypto.PublicKey, error) { - if authorityID == "" { - return "", nil, errors.New("no authority ID provided") +func (s *Service) RevokeX509UpstreamAuthority(ctx context.Context, req *localauthorityv1.RevokeX509UpstreamAuthorityRequest) (*localauthorityv1.RevokeX509UpstreamAuthorityResponse, error) { + rpccontext.AddRPCAuditFields(ctx, buildAuditUpstreamLogFields(req.SubjectKeyId)) + log := rpccontext.Logger(ctx) + + if req.SubjectKeyId != "" { + log = log.WithField(telemetry.SubjectKeyID, req.SubjectKeyId) } + if !s.ca.IsUpstreamAuthority() { + return nil, api.MakeErr(log, codes.FailedPrecondition, "upstream authority is not configured", nil) + } + + // TODO: may we request in lower case? + // Normalize SKID + subjectKeyIDRequest := strings.ToLower(req.SubjectKeyId) + if err := s.validateUpstreamAuthoritySubjectKey(subjectKeyIDRequest); err != nil { + return nil, api.MakeErr(log, codes.InvalidArgument, "invalid subject key ID", err) + } + + if err := s.ds.RevokeX509CA(ctx, s.td.IDString(), subjectKeyIDRequest); err != nil { + return nil, api.MakeErr(log, codes.Internal, "failed to revoke X.509 upstream authority", err) + } + + rpccontext.AuditRPC(ctx) + log.Info("X.509 upstream authority successfully revoked") + + return &localauthorityv1.RevokeX509UpstreamAuthorityResponse{}, nil +} + +// validateLocalAuthorityID validates provided authority ID, and return OLD associated public key +func (s *Service) validateLocalAuthorityID(authorityID string) error { nextSlot := s.ca.GetNextX509CASlot() - if authorityID == nextSlot.AuthorityID() { - if nextSlot.Status() == journal.Status_PREPARED { - return "", nil, errors.New("unable to use a prepared key") - } + switch { + case authorityID == "": + return errors.New("no authority ID provided") + case authorityID == s.ca.GetCurrentX509CASlot().AuthorityID(): + return errors.New("unable to use current authority") + case authorityID != nextSlot.AuthorityID(): + return errors.New("only Old local authority can be revoked") + case nextSlot.Status() != journal.Status_OLD: + return errors.New("only Old local authority can be revoked") + } + + return nil +} - return nextSlot.AuthorityID(), nextSlot.PublicKey(), nil +func (s *Service) validateUpstreamAuthoritySubjectKey(subjectKeyIDRequest string) error { + if subjectKeyIDRequest == "" { + return errors.New("no subject key ID provided") } currentSlot := s.ca.GetCurrentX509CASlot() - if currentSlot.AuthorityID() == authorityID { - return "", nil, errors.New("unable to use current authority") + if subjectKeyIDRequest == currentSlot.UpstreamAuthorityID() { + return errors.New("unable to use upstream authority singing current authority") } - bundle, err := s.ds.FetchBundle(ctx, s.td.IDString()) - if err != nil { - return "", nil, err + nextSlot := s.ca.GetNextX509CASlot() + if subjectKeyIDRequest != nextSlot.UpstreamAuthorityID() { + return errors.New("upstream authority didn't sign the old local authority") } - for _, ca := range bundle.RootCas { - cert, err := x509.ParseCertificate(ca.DerBytes) - if err != nil { - return "", nil, err - } - - subjectKeyID := x509util.SubjectKeyIDToString(cert.SubjectKeyId) - if authorityID == subjectKeyID { - return subjectKeyID, cert.PublicKey, nil - } + if nextSlot.Status() == journal.Status_PREPARED { + return errors.New("only upstream authorities signing an old authority can be used") } - return "", nil, errors.New("no ca found with provided authority ID") + return nil } // validateAuthorityID validates provided authority ID @@ -482,6 +552,14 @@ func buildAuditLogFields(authorityID string) logrus.Fields { return fields } +func buildAuditUpstreamLogFields(authorityID string) logrus.Fields { + fields := logrus.Fields{} + if authorityID != "" { + fields[telemetry.SubjectKeyID] = authorityID + } + return fields +} + func stateFromSlot(s manager.Slot) *localauthorityv1.AuthorityState { return &localauthorityv1.AuthorityState{ AuthorityId: s.AuthorityID(), diff --git a/pkg/server/api/localauthority/v1/service_test.go b/pkg/server/api/localauthority/v1/service_test.go index edafbe16578..adf8ec12be2 100644 --- a/pkg/server/api/localauthority/v1/service_test.go +++ b/pkg/server/api/localauthority/v1/service_test.go @@ -24,8 +24,10 @@ import ( "github.com/spiffe/spire/test/fakes/fakedatastore" "github.com/spiffe/spire/test/grpctest" "github.com/spiffe/spire/test/spiretest" + "github.com/spiffe/spire/test/testca" "github.com/spiffe/spire/test/testkey" testutil "github.com/spiffe/spire/test/util" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -473,7 +475,6 @@ func TestTaintJWTAuthority(t *testing.T) { nextSlot *fakeSlot keyToTaint string - expectKeyToTaint string expectLogs []spiretest.LogEntry expectCode codes.Code expectMsg string @@ -709,11 +710,10 @@ func TestRevokeJWTAuthority(t *testing.T) { keyToRevoke string noTaintedKeys bool - expectKeyToTaint crypto.PublicKey - expectLogs []spiretest.LogEntry - expectCode codes.Code - expectMsg string - expectResp *localauthorityv1.RevokeJWTAuthorityResponse + expectLogs []spiretest.LogEntry + expectCode codes.Code + expectMsg string + expectResp *localauthorityv1.RevokeJWTAuthorityResponse }{ { name: "revoke authority from parameter", @@ -1299,8 +1299,6 @@ func TestTaintX509Authority(t *testing.T) { nextCA, nextKey, err := testutil.SelfSign(template) require.NoError(t, err) - nextPublicKeyRaw, err := x509.MarshalPKIXPublicKey(nextKey.Public()) - require.NoError(t, err) nextKeySKI, err := x509util.GetSubjectKeyID(nextKey.Public()) require.NoError(t, err) nextAuthorityID := x509util.SubjectKeyIDToString(nextKeySKI) @@ -1308,18 +1306,31 @@ func TestTaintX509Authority(t *testing.T) { oldCA, _, err := testutil.SelfSign(template) require.NoError(t, err) - for _, tt := range []struct { - name string - currentSlot *fakeSlot - nextSlot *fakeSlot - keyToTaint string + defaultRootCAs := []*common.Certificate{ + { + DerBytes: currentCA.Raw, + }, + { + DerBytes: nextCA.Raw, + }, + { + DerBytes: oldCA.Raw, + }, + } - expectKeyToTaint crypto.PublicKey - expectLogs []spiretest.LogEntry - expectCode codes.Code - expectMsg string - expectResp *localauthorityv1.TaintX509AuthorityResponse - taintedKey []byte + for _, tt := range []struct { + name string + currentSlot *fakeSlot + nextSlot *fakeSlot + keyToTaint string + customRootCAs []*common.Certificate + isUpstreamAuthority bool + notifyTaintedErr error + + expectLogs []spiretest.LogEntry + expectCode codes.Code + expectMsg string + expectResp *localauthorityv1.TaintX509AuthorityResponse }{ { name: "taint old authority", @@ -1463,9 +1474,20 @@ func TestTaintX509Authority(t *testing.T) { currentSlot: createSlot(journal.Status_ACTIVE, currentAuthorityID, currentKey.Public(), notAfterCurrent), nextSlot: createSlot(journal.Status_OLD, nextAuthorityID, nextKey.Public(), notAfterNext), keyToTaint: nextAuthorityID, - taintedKey: nextPublicKeyRaw, - expectCode: codes.Internal, - expectMsg: "failed to taint X.509 authority: root CA is already tainted", + customRootCAs: []*common.Certificate{ + { + DerBytes: currentCA.Raw, + }, + { + DerBytes: nextCA.Raw, + TaintedKey: true, + }, + { + DerBytes: oldCA.Raw, + }, + }, + expectCode: codes.Internal, + expectMsg: "failed to taint X.509 authority: root CA is already tainted", expectLogs: []spiretest.LogEntry{ { Level: logrus.ErrorLevel, @@ -1488,37 +1510,368 @@ func TestTaintX509Authority(t *testing.T) { }, }, }, + { + name: "fail on upstream authority", + currentSlot: createSlot(journal.Status_ACTIVE, currentAuthorityID, currentKey.Public(), notAfterCurrent), + nextSlot: createSlot(journal.Status_OLD, nextAuthorityID, nextKey.Public(), notAfterNext), + keyToTaint: nextAuthorityID, + isUpstreamAuthority: true, + expectCode: codes.FailedPrecondition, + expectMsg: "local authority can't be tainted if there is an upstream authority", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Local authority can't be tainted if there is an upstream authority", + Data: logrus.Fields{ + telemetry.LocalAuthorityID: nextAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "FailedPrecondition", + telemetry.StatusMessage: "local authority can't be tainted if there is an upstream authority", + telemetry.Type: "audit", + telemetry.LocalAuthorityID: nextAuthorityID, + }, + }, + }, + }, + { + name: "fail to notify tainted authority", + currentSlot: createSlot(journal.Status_ACTIVE, currentAuthorityID, currentKey.Public(), notAfterCurrent), + nextSlot: createSlot(journal.Status_OLD, nextAuthorityID, nextKey.Public(), notAfterNext), + keyToTaint: nextAuthorityID, + notifyTaintedErr: errors.New("oh no"), + expectCode: codes.Internal, + expectMsg: "failed to notify tainted authority: oh no", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Failed to notify tainted authority", + Data: logrus.Fields{ + telemetry.LocalAuthorityID: nextAuthorityID, + logrus.ErrorKey: "oh no", + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "Internal", + telemetry.StatusMessage: "failed to notify tainted authority: oh no", + telemetry.Type: "audit", + telemetry.LocalAuthorityID: nextAuthorityID, + }, + }, + }, + }, } { t.Run(tt.name, func(t *testing.T) { test := setupServiceTest(t) defer test.Cleanup() - var taintedKeys []*common.X509TaintedKey - if tt.taintedKey != nil { - taintedKeys = append(taintedKeys, &common.X509TaintedKey{PublicKey: tt.taintedKey}) - } - test.ca.currentX509CASlot = tt.currentSlot test.ca.nextX509CASlot = tt.nextSlot + test.ca.isUpstreamAuthority = tt.isUpstreamAuthority + test.ca.notifyTaintedExpectErr = tt.notifyTaintedErr + + rootCAs := defaultRootCAs + if tt.customRootCAs != nil { + rootCAs = tt.customRootCAs + } + _, err := test.ds.CreateBundle(ctx, &common.Bundle{ TrustDomainId: serverTrustDomain.IDString(), - RootCas: []*common.Certificate{ - { - DerBytes: currentCA.Raw, + RootCas: rootCAs, + }) + require.NoError(t, err) + + resp, err := test.client.TaintX509Authority(ctx, &localauthorityv1.TaintX509AuthorityRequest{ + AuthorityId: tt.keyToTaint, + }) + + spiretest.AssertGRPCStatusHasPrefix(t, err, tt.expectCode, tt.expectMsg) + spiretest.AssertProtoEqual(t, tt.expectResp, resp) + spiretest.AssertLogs(t, test.logHook.AllEntries(), tt.expectLogs) + // Validate notification is received on success test cases + if tt.expectMsg == "" { + assert.Equal(t, tt.keyToTaint, test.ca.notifyTaintedAuthorityID) + } + }) + } +} + +func TestTaintX509UpstreamAuthority(t *testing.T) { + getUpstreamCertAndSubjectID := func(ca *testca.CA) (*x509.Certificate, string) { + // Self signed CA will return itself + cert := ca.X509Authorities()[0] + return cert, x509util.SubjectKeyIDToString(cert.SubjectKeyId) + } + + // Create active upstream authority + activeUpstreamAuthority := testca.New(t, serverTrustDomain) + activeUpstreamAuthorityCert, activeUpstreamAuthorityID := getUpstreamCertAndSubjectID(activeUpstreamAuthority) + + // Create newUpstreamAuthority childs + currentIntermediateCA := activeUpstreamAuthority.ChildCA(testca.WithID(serverTrustDomain.ID())) + nextIntermediateCA := activeUpstreamAuthority.ChildCA(testca.WithID(serverTrustDomain.ID())) + + // Create old upstream authority + deactivatedUpstreamAuthority := testca.New(t, serverTrustDomain) + deactivatedUpstreamAuthorityCert, deactivatedUpstreamAuthorityID := getUpstreamCertAndSubjectID(deactivatedUpstreamAuthority) + + // Create intermediate using old upstream authority + oldIntermediateCA := deactivatedUpstreamAuthority.ChildCA(testca.WithID(serverTrustDomain.ID())) + + defaultRootCAs := []*common.Certificate{ + { + DerBytes: activeUpstreamAuthorityCert.Raw, + }, + { + DerBytes: deactivatedUpstreamAuthorityCert.Raw, + }, + } + + for _, tt := range []struct { + name string + currentSlot *fakeSlot + nextSlot *fakeSlot + subjectKeyIDToTaint string + customRootCAs []*common.Certificate + isLocalAuthority bool + + expectLogs []spiretest.LogEntry + expectCode codes.Code + expectMsg string + expectResp *localauthorityv1.TaintX509UpstreamAuthorityResponse + }{ + { + name: "taint old upstream authority", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + subjectKeyIDToTaint: deactivatedUpstreamAuthorityID, + expectResp: &localauthorityv1.TaintX509UpstreamAuthorityResponse{}, + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "success", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, }, - { - DerBytes: nextCA.Raw, + }, + { + Level: logrus.InfoLevel, + Message: "X.509 upstream authority tainted successfully", + Data: logrus.Fields{ + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, }, - { - DerBytes: oldCA.Raw, + }, + }, + }, + { + name: "unable to taint with upstream disabled", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + subjectKeyIDToTaint: deactivatedUpstreamAuthorityID, + expectCode: codes.FailedPrecondition, + expectMsg: "upstream authority is not configured", + isLocalAuthority: true, + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Upstream authority is not configured", + Data: logrus.Fields{ + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "FailedPrecondition", + telemetry.StatusMessage: "upstream authority is not configured", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + { + name: "no subjectID provided", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + expectCode: codes.InvalidArgument, + expectMsg: "provided subject key id is not valid: no subject key ID provided", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: provided subject key id is not valid", + Data: logrus.Fields{ + logrus.ErrorKey: "no subject key ID provided", + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "provided subject key id is not valid: no subject key ID provided", + telemetry.Type: "audit", + }, + }, + }, + }, + { + name: "unable to use active upstream authority", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, nextIntermediateCA, notAfterNext), + subjectKeyIDToTaint: activeUpstreamAuthorityID, + expectCode: codes.InvalidArgument, + expectMsg: "provided subject key id is not valid: unable to use upstream authority singing current authority", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: provided subject key id is not valid", + Data: logrus.Fields{ + logrus.ErrorKey: "unable to use upstream authority singing current authority", + telemetry.SubjectKeyID: activeUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "provided subject key id is not valid: unable to use upstream authority singing current authority", + telemetry.Type: "audit", + telemetry.SubjectKeyID: activeUpstreamAuthorityID, }, }, - X509TaintedKeys: taintedKeys, + }, + }, + { + name: "unknown subjectKeyID", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, nextIntermediateCA, notAfterNext), + subjectKeyIDToTaint: "invalidID", + expectCode: codes.InvalidArgument, + expectMsg: "provided subject key id is not valid: upstream authority didn't sign the old local authority", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: provided subject key id is not valid", + Data: logrus.Fields{ + logrus.ErrorKey: "upstream authority didn't sign the old local authority", + telemetry.SubjectKeyID: "invalidID", + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "provided subject key id is not valid: upstream authority didn't sign the old local authority", + telemetry.Type: "audit", + telemetry.SubjectKeyID: "invalidID", + }, + }, + }, + }, + { + name: "prepared authority signed by upstream authority", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_PREPARED, oldIntermediateCA, notAfterNext), + subjectKeyIDToTaint: deactivatedUpstreamAuthorityID, + expectCode: codes.InvalidArgument, + expectMsg: "provided subject key id is not valid: only upstream authorities signing an old authority can be used", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: provided subject key id is not valid", + Data: logrus.Fields{ + logrus.ErrorKey: "only upstream authorities signing an old authority can be used", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "provided subject key id is not valid: only upstream authorities signing an old authority can be used", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + { + name: "ds failed to taint", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + subjectKeyIDToTaint: deactivatedUpstreamAuthorityID, + expectCode: codes.Internal, + expectMsg: "failed to taint upstream authority: no ca found with provided subject key ID", + customRootCAs: []*common.Certificate{ + { + DerBytes: activeUpstreamAuthorityCert.Raw, + }, + }, + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Failed to taint upstream authority", + Data: logrus.Fields{ + logrus.ErrorKey: "rpc error: code = NotFound desc = no ca found with provided subject key ID", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "Internal", + telemetry.StatusMessage: "failed to taint upstream authority: no ca found with provided subject key ID", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + test := setupServiceTest(t) + defer test.Cleanup() + + test.ca.currentX509CASlot = tt.currentSlot + test.ca.nextX509CASlot = tt.nextSlot + test.ca.isUpstreamAuthority = !tt.isLocalAuthority + + rootCAs := defaultRootCAs + if tt.customRootCAs != nil { + rootCAs = tt.customRootCAs + } + + _, err := test.ds.CreateBundle(ctx, &common.Bundle{ + TrustDomainId: serverTrustDomain.IDString(), + RootCas: rootCAs, }) require.NoError(t, err) - resp, err := test.client.TaintX509Authority(ctx, &localauthorityv1.TaintX509AuthorityRequest{ - AuthorityId: tt.keyToTaint, + resp, err := test.client.TaintX509UpstreamAuthority(ctx, &localauthorityv1.TaintX509UpstreamAuthorityRequest{ + SubjectKeyId: tt.subjectKeyIDToTaint, }) spiretest.AssertGRPCStatusHasPrefix(t, err, tt.expectCode, tt.expectMsg) @@ -1542,41 +1895,37 @@ func TestRevokeX509Authority(t *testing.T) { nextCA, nextKey, err := testutil.SelfSign(template) require.NoError(t, err) - nextPublicKeyRaw, err := x509.MarshalPKIXPublicKey(nextKey.Public()) - require.NoError(t, err) nextKeySKI, err := x509util.GetSubjectKeyID(nextKey.Public()) require.NoError(t, err) nextAuthorityID := x509util.SubjectKeyIDToString(nextKeySKI) - oldCA, oldKey, err := testutil.SelfSign(template) - require.NoError(t, err) - oldPublicKeyRaw, err := x509.MarshalPKIXPublicKey(oldKey.Public()) + _, noStoredKey, err := testutil.SelfSign(template) require.NoError(t, err) - oldKeySKI, err := x509util.GetSubjectKeyID(oldKey.Public()) + noStoredKeySKI, err := x509util.GetSubjectKeyID(noStoredKey.Public()) require.NoError(t, err) - oldAuthorityID := x509util.SubjectKeyIDToString(oldKeySKI) + noStoredAuthorityID := x509util.SubjectKeyIDToString(noStoredKeySKI) for _, tt := range []struct { - name string - currentSlot *fakeSlot - nextSlot *fakeSlot - keyToRevoke string - noTaintedKeys bool - - expectKeyToTaint crypto.PublicKey - expectLogs []spiretest.LogEntry - expectCode codes.Code - expectMsg string - expectResp *localauthorityv1.RevokeX509AuthorityResponse + name string + currentSlot *fakeSlot + nextSlot *fakeSlot + keyToRevoke string + noTaintedKeys bool + isUpstreamAuthority bool + + expectLogs []spiretest.LogEntry + expectCode codes.Code + expectMsg string + expectResp *localauthorityv1.RevokeX509AuthorityResponse }{ { name: "revoke authority from parameter", currentSlot: createSlot(journal.Status_ACTIVE, currentAuthorityID, currentKey.Public(), notAfterCurrent), nextSlot: createSlot(journal.Status_OLD, nextAuthorityID, nextKey.Public(), notAfterNext), - keyToRevoke: oldAuthorityID, + keyToRevoke: nextAuthorityID, expectResp: &localauthorityv1.RevokeX509AuthorityResponse{ RevokedAuthority: &localauthorityv1.AuthorityState{ - AuthorityId: oldAuthorityID, + AuthorityId: nextAuthorityID, }, }, expectLogs: []spiretest.LogEntry{ @@ -1586,14 +1935,14 @@ func TestRevokeX509Authority(t *testing.T) { Data: logrus.Fields{ telemetry.Status: "success", telemetry.Type: "audit", - telemetry.LocalAuthorityID: oldAuthorityID, + telemetry.LocalAuthorityID: nextAuthorityID, }, }, { Level: logrus.InfoLevel, Message: "X.509 authority revoked successfully", Data: logrus.Fields{ - telemetry.LocalAuthorityID: oldAuthorityID, + telemetry.LocalAuthorityID: nextAuthorityID, }, }, }, @@ -1630,13 +1979,13 @@ func TestRevokeX509Authority(t *testing.T) { nextSlot: createSlot(journal.Status_PREPARED, nextAuthorityID, nextKey.Public(), notAfterNext), keyToRevoke: nextAuthorityID, expectCode: codes.InvalidArgument, - expectMsg: "invalid authority ID: unable to use a prepared key", + expectMsg: "invalid authority ID: only Old local authority can be revoked", expectLogs: []spiretest.LogEntry{ { Level: logrus.ErrorLevel, Message: "Invalid argument: invalid authority ID", Data: logrus.Fields{ - logrus.ErrorKey: "unable to use a prepared key", + logrus.ErrorKey: "only Old local authority can be revoked", telemetry.LocalAuthorityID: nextAuthorityID, }, }, @@ -1646,7 +1995,7 @@ func TestRevokeX509Authority(t *testing.T) { Data: logrus.Fields{ telemetry.Status: "error", telemetry.StatusCode: "InvalidArgument", - telemetry.StatusMessage: "invalid authority ID: unable to use a prepared key", + telemetry.StatusMessage: "invalid authority ID: only Old local authority can be revoked", telemetry.Type: "audit", telemetry.LocalAuthorityID: nextAuthorityID, }, @@ -1685,17 +2034,17 @@ func TestRevokeX509Authority(t *testing.T) { { name: "ds fails to revoke", currentSlot: createSlot(journal.Status_ACTIVE, currentAuthorityID, currentKey.Public(), notAfterCurrent), - nextSlot: createSlot(journal.Status_OLD, nextAuthorityID, nextKey.Public(), notAfterNext), - keyToRevoke: authorityIDKeyA, - expectCode: codes.InvalidArgument, - expectMsg: "invalid authority ID: no ca found with provided authority ID", + nextSlot: createSlot(journal.Status_OLD, noStoredAuthorityID, noStoredKey.Public(), notAfterNext), + keyToRevoke: noStoredAuthorityID, + expectCode: codes.Internal, + expectMsg: "failed to revoke X.509 authority: no root CA found with provided subject key ID", expectLogs: []spiretest.LogEntry{ { Level: logrus.ErrorLevel, - Message: "Invalid argument: invalid authority ID", + Message: "Failed to revoke X.509 authority", Data: logrus.Fields{ - logrus.ErrorKey: "no ca found with provided authority ID", - telemetry.LocalAuthorityID: authorityIDKeyA, + logrus.ErrorKey: "rpc error: code = NotFound desc = no root CA found with provided subject key ID", + telemetry.LocalAuthorityID: noStoredAuthorityID, }, }, { @@ -1703,10 +2052,10 @@ func TestRevokeX509Authority(t *testing.T) { Message: "API accessed", Data: logrus.Fields{ telemetry.Status: "error", - telemetry.StatusCode: "InvalidArgument", - telemetry.StatusMessage: "invalid authority ID: no ca found with provided authority ID", + telemetry.StatusCode: "Internal", + telemetry.StatusMessage: "failed to revoke X.509 authority: no root CA found with provided subject key ID", telemetry.Type: "audit", - telemetry.LocalAuthorityID: authorityIDKeyA, + telemetry.LocalAuthorityID: noStoredAuthorityID, }, }, }, @@ -1741,6 +2090,35 @@ func TestRevokeX509Authority(t *testing.T) { }, }, }, + { + name: "unable to revoke upstream authority", + currentSlot: createSlot(journal.Status_ACTIVE, currentAuthorityID, currentKey.Public(), notAfterCurrent), + nextSlot: createSlot(journal.Status_OLD, nextAuthorityID, nextKey.Public(), notAfterNext), + keyToRevoke: nextAuthorityID, + isUpstreamAuthority: true, + expectCode: codes.FailedPrecondition, + expectMsg: "local authority can't be revoked if there is an upstream authority", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Local authority can't be revoked if there is an upstream authority", + Data: logrus.Fields{ + telemetry.LocalAuthorityID: nextAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "FailedPrecondition", + telemetry.StatusMessage: "local authority can't be revoked if there is an upstream authority", + telemetry.Type: "audit", + telemetry.LocalAuthorityID: nextAuthorityID, + }, + }, + }, + }, } { t.Run(tt.name, func(t *testing.T) { test := setupServiceTest(t) @@ -1748,14 +2126,7 @@ func TestRevokeX509Authority(t *testing.T) { test.ca.currentX509CASlot = tt.currentSlot test.ca.nextX509CASlot = tt.nextSlot - - var taintedKeys []*common.X509TaintedKey - if !tt.noTaintedKeys { - taintedKeys = []*common.X509TaintedKey{ - {PublicKey: nextPublicKeyRaw}, - {PublicKey: oldPublicKeyRaw}, - } - } + test.ca.isUpstreamAuthority = tt.isUpstreamAuthority _, err := test.ds.CreateBundle(ctx, &common.Bundle{ TrustDomainId: serverTrustDomain.IDString(), @@ -1764,13 +2135,10 @@ func TestRevokeX509Authority(t *testing.T) { DerBytes: currentCA.Raw, }, { - DerBytes: nextCA.Raw, - }, - { - DerBytes: oldCA.Raw, + DerBytes: nextCA.Raw, + TaintedKey: !tt.noTaintedKeys, }, }, - X509TaintedKeys: taintedKeys, }) require.NoError(t, err) @@ -1785,6 +2153,272 @@ func TestRevokeX509Authority(t *testing.T) { } } +func TestRevokeX509UpstreamAuthority(t *testing.T) { + getUpstreamCertAndSubjectID := func(ca *testca.CA) (*x509.Certificate, string) { + // Self signed CA will return itself + cert := ca.X509Authorities()[0] + return cert, x509util.SubjectKeyIDToString(cert.SubjectKeyId) + } + + // Create active upstream authority + activeUpstreamAuthority := testca.New(t, serverTrustDomain) + activeUpstreamAuthorityCert, activeUpstreamAuthorityID := getUpstreamCertAndSubjectID(activeUpstreamAuthority) + + // Create newUpstreamAuthority childs + currentIntermediateCA := activeUpstreamAuthority.ChildCA(testca.WithID(serverTrustDomain.ID())) + nextIntermediateCA := activeUpstreamAuthority.ChildCA(testca.WithID(serverTrustDomain.ID())) + + // Create old upstream authority + deactivatedUpstreamAuthority := testca.New(t, serverTrustDomain) + deactivatedUpstreamAuthorityCert, deactivatedUpstreamAuthorityID := getUpstreamCertAndSubjectID(deactivatedUpstreamAuthority) + + // Create intermediate using old upstream authority + oldIntermediateCA := deactivatedUpstreamAuthority.ChildCA(testca.WithID(serverTrustDomain.ID())) + + for _, tt := range []struct { + name string + currentSlot *fakeSlot + nextSlot *fakeSlot + subjectKeyIDToRevoke string + noTaintedKeys bool + isLocalAuthority bool + + expectLogs []spiretest.LogEntry + expectCode codes.Code + expectMsg string + expectResp *localauthorityv1.RevokeX509UpstreamAuthorityResponse + }{ + { + name: "revoke authority", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + subjectKeyIDToRevoke: deactivatedUpstreamAuthorityID, + expectResp: &localauthorityv1.RevokeX509UpstreamAuthorityResponse{}, + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "success", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "X.509 upstream authority successfully revoked", + Data: logrus.Fields{ + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + { + name: "unable to revoke with upstream disabled", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + subjectKeyIDToRevoke: deactivatedUpstreamAuthorityID, + expectCode: codes.FailedPrecondition, + expectMsg: "upstream authority is not configured", + isLocalAuthority: true, + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Upstream authority is not configured", + Data: logrus.Fields{ + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "FailedPrecondition", + telemetry.StatusMessage: "upstream authority is not configured", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + { + name: "no subjectID provided", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + expectCode: codes.InvalidArgument, + expectMsg: "invalid subject key ID: no subject key ID provided", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: invalid subject key ID", + Data: logrus.Fields{ + logrus.ErrorKey: "no subject key ID provided", + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "invalid subject key ID: no subject key ID provided", + telemetry.Type: "audit", + }, + }, + }, + }, + { + name: "unable to use active upstream authority", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, nextIntermediateCA, notAfterNext), + subjectKeyIDToRevoke: activeUpstreamAuthorityID, + expectCode: codes.InvalidArgument, + expectMsg: "invalid subject key ID: unable to use upstream authority singing current authority", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: invalid subject key ID", + Data: logrus.Fields{ + logrus.ErrorKey: "unable to use upstream authority singing current authority", + telemetry.SubjectKeyID: activeUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "invalid subject key ID: unable to use upstream authority singing current authority", + telemetry.Type: "audit", + telemetry.SubjectKeyID: activeUpstreamAuthorityID, + }, + }, + }, + }, + { + name: "unknown subjectKeyID", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, nextIntermediateCA, notAfterNext), + subjectKeyIDToRevoke: "invalidID", + expectCode: codes.InvalidArgument, + expectMsg: "invalid subject key ID: upstream authority didn't sign the old local authority", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: invalid subject key ID", + Data: logrus.Fields{ + logrus.ErrorKey: "upstream authority didn't sign the old local authority", + telemetry.SubjectKeyID: "invalidID", + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "invalid subject key ID: upstream authority didn't sign the old local authority", + telemetry.Type: "audit", + telemetry.SubjectKeyID: "invalidID", + }, + }, + }, + }, + { + name: "prepared authority signed by upstream authority", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_PREPARED, oldIntermediateCA, notAfterNext), + subjectKeyIDToRevoke: deactivatedUpstreamAuthorityID, + expectCode: codes.InvalidArgument, + expectMsg: "invalid subject key ID: only upstream authorities signing an old authority can be used", + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Invalid argument: invalid subject key ID", + Data: logrus.Fields{ + logrus.ErrorKey: "only upstream authorities signing an old authority can be used", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "InvalidArgument", + telemetry.StatusMessage: "invalid subject key ID: only upstream authorities signing an old authority can be used", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + { + name: "ds failed revoke untainted keys", + currentSlot: createSlotWithUpstream(journal.Status_ACTIVE, currentIntermediateCA, notAfterCurrent), + nextSlot: createSlotWithUpstream(journal.Status_OLD, oldIntermediateCA, notAfterNext), + subjectKeyIDToRevoke: deactivatedUpstreamAuthorityID, + expectCode: codes.Internal, + expectMsg: "failed to revoke X.509 upstream authority: it is not possible to revoke an untainted root CA", + noTaintedKeys: true, + expectLogs: []spiretest.LogEntry{ + { + Level: logrus.ErrorLevel, + Message: "Failed to revoke X.509 upstream authority", + Data: logrus.Fields{ + logrus.ErrorKey: "rpc error: code = InvalidArgument desc = it is not possible to revoke an untainted root CA", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + { + Level: logrus.InfoLevel, + Message: "API accessed", + Data: logrus.Fields{ + telemetry.Status: "error", + telemetry.StatusCode: "Internal", + telemetry.StatusMessage: "failed to revoke X.509 upstream authority: it is not possible to revoke an untainted root CA", + telemetry.Type: "audit", + telemetry.SubjectKeyID: deactivatedUpstreamAuthorityID, + }, + }, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + test := setupServiceTest(t) + defer test.Cleanup() + + test.ca.currentX509CASlot = tt.currentSlot + test.ca.nextX509CASlot = tt.nextSlot + test.ca.isUpstreamAuthority = !tt.isLocalAuthority + + _, err := test.ds.CreateBundle(ctx, &common.Bundle{ + TrustDomainId: serverTrustDomain.IDString(), + RootCas: []*common.Certificate{ + { + DerBytes: activeUpstreamAuthorityCert.Raw, + }, + { + DerBytes: deactivatedUpstreamAuthorityCert.Raw, + TaintedKey: !tt.noTaintedKeys, + }, + }, + }) + require.NoError(t, err) + + resp, err := test.client.RevokeX509UpstreamAuthority(ctx, &localauthorityv1.RevokeX509UpstreamAuthorityRequest{ + SubjectKeyId: tt.subjectKeyIDToRevoke, + }) + + spiretest.AssertGRPCStatusHasPrefix(t, err, tt.expectCode, tt.expectMsg) + spiretest.AssertProtoEqual(t, tt.expectResp, resp) + spiretest.AssertLogs(t, test.logHook.AllEntries(), tt.expectLogs) + }) + } +} + func setupServiceTest(t *testing.T) *serviceTest { ds := fakedatastore.New(t) m := &fakeCAManager{} @@ -1846,7 +2480,23 @@ type fakeCAManager struct { prepareJWTKeyErr error - prepareX509CAErr error + prepareX509CAErr error + isUpstreamAuthority bool + + notifyTaintedExpectErr error + notifyTaintedAuthorityID string +} + +func (m *fakeCAManager) NotifyTaintedX509Authority(ctx context.Context, authorityID string) error { + if m.notifyTaintedExpectErr != nil { + return m.notifyTaintedExpectErr + } + m.notifyTaintedAuthorityID = authorityID + return nil +} + +func (m *fakeCAManager) IsUpstreamAuthority() bool { + return m.isUpstreamAuthority } func (m *fakeCAManager) GetCurrentJWTKeySlot() manager.Slot { @@ -1884,10 +2534,15 @@ func (m *fakeCAManager) RotateX509CA(context.Context) { type fakeSlot struct { manager.Slot - authorityID string - notAfter time.Time - publicKey crypto.PublicKey - status journal.Status + authorityID string + upstreamAuthorityID string + notAfter time.Time + publicKey crypto.PublicKey + status journal.Status +} + +func (s *fakeSlot) UpstreamAuthorityID() string { + return s.upstreamAuthorityID } func (s *fakeSlot) AuthorityID() string { @@ -1914,3 +2569,12 @@ func createSlot(status journal.Status, authorityID string, publicKey crypto.Publ status: status, } } + +func createSlotWithUpstream(status journal.Status, ca *testca.CA, notAfter time.Time) *fakeSlot { + return &fakeSlot{ + authorityID: ca.GetSubjectKeyID(), + notAfter: notAfter, + status: status, + upstreamAuthorityID: ca.GetUpstreamAuthorityID(), + } +} diff --git a/pkg/server/api/status.go b/pkg/server/api/status.go index d00e879a5a0..6f5ff94ff39 100644 --- a/pkg/server/api/status.go +++ b/pkg/server/api/status.go @@ -11,7 +11,15 @@ import ( ) // CreateStatus creates a proto Status -func CreateStatus(code codes.Code, format string, a ...any) *types.Status { +func CreateStatus(code codes.Code, msg string) *types.Status { + return &types.Status{ + Code: int32(code), + Message: msg, + } +} + +// CreateStatus creates a proto Status +func CreateStatusf(code codes.Code, format string, a ...any) *types.Status { return &types.Status{ Code: int32(code), Message: fmt.Sprintf(format, a...), diff --git a/pkg/server/authorizedentries/cache.go b/pkg/server/authorizedentries/cache.go index 98147958162..77e2d7aaf55 100644 --- a/pkg/server/authorizedentries/cache.go +++ b/pkg/server/authorizedentries/cache.go @@ -267,7 +267,7 @@ func (c *Cache) removeEntry(entryID string) { } } -func (c *Cache) stats() cacheStats { +func (c *Cache) Stats() cacheStats { return cacheStats{ AgentsByID: c.agentsByID.Len(), AgentsByExpiresAt: c.agentsByExpiresAt.Len(), diff --git a/pkg/server/authorizedentries/cache_test.go b/pkg/server/authorizedentries/cache_test.go index be4c2ef9295..86315bece56 100644 --- a/pkg/server/authorizedentries/cache_test.go +++ b/pkg/server/authorizedentries/cache_test.go @@ -173,7 +173,7 @@ func TestCacheInternalStats(t *testing.T) { clk := clock.NewMock(t) t.Run("pristine", func(t *testing.T) { cache := NewCache(clk) - require.Zero(t, cache.stats()) + require.Zero(t, cache.Stats()) }) t.Run("entries and aliases", func(t *testing.T) { @@ -189,13 +189,13 @@ func TestCacheInternalStats(t *testing.T) { require.Equal(t, cacheStats{ EntriesByEntryID: 1, EntriesByParentID: 1, - }, cache.stats()) + }, cache.Stats()) cache.UpdateEntry(entry2a) require.Equal(t, cacheStats{ EntriesByEntryID: 2, EntriesByParentID: 2, - }, cache.stats()) + }, cache.Stats()) cache.UpdateEntry(entry2b) require.Equal(t, cacheStats{ @@ -203,20 +203,20 @@ func TestCacheInternalStats(t *testing.T) { EntriesByParentID: 1, AliasesByEntryID: 2, // one for each selector AliasesBySelector: 2, // one for each selector - }, cache.stats()) + }, cache.Stats()) cache.RemoveEntry(entry1.Id) require.Equal(t, cacheStats{ AliasesByEntryID: 2, // one for each selector AliasesBySelector: 2, // one for each selector - }, cache.stats()) + }, cache.Stats()) cache.RemoveEntry(entry2b.Id) - require.Zero(t, cache.stats()) + require.Zero(t, cache.Stats()) // Remove again and make sure nothing happens. cache.RemoveEntry(entry2b.Id) - require.Zero(t, cache.stats()) + require.Zero(t, cache.Stats()) }) t.Run("agents", func(t *testing.T) { @@ -225,28 +225,28 @@ func TestCacheInternalStats(t *testing.T) { require.Equal(t, cacheStats{ AgentsByID: 1, AgentsByExpiresAt: 1, - }, cache.stats()) + }, cache.Stats()) cache.UpdateAgent(agent2.String(), now.Add(time.Hour*2), []*types.Selector{sel2}) require.Equal(t, cacheStats{ AgentsByID: 2, AgentsByExpiresAt: 2, - }, cache.stats()) + }, cache.Stats()) cache.UpdateAgent(agent2.String(), now.Add(time.Hour*3), []*types.Selector{sel2}) require.Equal(t, cacheStats{ AgentsByID: 2, AgentsByExpiresAt: 2, - }, cache.stats()) + }, cache.Stats()) cache.RemoveAgent(agent1.String()) require.Equal(t, cacheStats{ AgentsByID: 1, AgentsByExpiresAt: 1, - }, cache.stats()) + }, cache.Stats()) cache.RemoveAgent(agent2.String()) - require.Zero(t, cache.stats()) + require.Zero(t, cache.Stats()) }) } diff --git a/pkg/server/authpolicy/policy_data.json b/pkg/server/authpolicy/policy_data.json index d2363a1653d..baa1a2e4b23 100644 --- a/pkg/server/authpolicy/policy_data.json +++ b/pkg/server/authpolicy/policy_data.json @@ -200,6 +200,66 @@ "full_method": "/spire.api.server.trustdomain.v1.TrustDomain/RefreshBundle", "allow_local": true, "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/GetJWTAuthorityState", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/PrepareJWTAuthority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/ActivateJWTAuthority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/TaintJWTAuthority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/RevokeJWTAuthority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/GetX509AuthorityState", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/PrepareX509Authority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/ActivateX509Authority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/TaintX509Authority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/TaintX509UpstreamAuthority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/RevokeX509Authority", + "allow_local": true, + "allow_admin": true + }, + { + "full_method": "/spire.api.server.localauthority.v1.LocalAuthority/RevokeX509UpstreamAuthority", + "allow_local": true, + "allow_admin": true } ] } diff --git a/pkg/server/bundle/datastore/wrapper.go b/pkg/server/bundle/datastore/wrapper.go index fc56152de83..7ef9dc7145c 100644 --- a/pkg/server/bundle/datastore/wrapper.go +++ b/pkg/server/bundle/datastore/wrapper.go @@ -2,7 +2,6 @@ package datastore import ( "context" - "crypto" "time" "github.com/spiffe/spire/pkg/server/datastore" @@ -39,8 +38,8 @@ func (w datastoreWrapper) PruneBundle(ctx context.Context, trustDomainID string, return changed, err } -func (w datastoreWrapper) RevokeX509CA(ctx context.Context, trustDomainID string, publicKeyToRevoke crypto.PublicKey) error { - err := w.DataStore.RevokeX509CA(ctx, trustDomainID, publicKeyToRevoke) +func (w datastoreWrapper) RevokeX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToRevoke string) error { + err := w.DataStore.RevokeX509CA(ctx, trustDomainID, subjectKeyIDToRevoke) if err == nil { w.bundleUpdated() } diff --git a/pkg/server/bundle/datastore/wrapper_test.go b/pkg/server/bundle/datastore/wrapper_test.go index 93aed471a54..6fe3a42e3dc 100644 --- a/pkg/server/bundle/datastore/wrapper_test.go +++ b/pkg/server/bundle/datastore/wrapper_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/pkg/server/datastore" "github.com/spiffe/spire/proto/spire/common" "github.com/spiffe/spire/test/fakes/fakedatastore" @@ -52,12 +53,13 @@ func TestWithBundlePublisher(t *testing.T) { { name: "RevokeX509CA", assertCallingCallback: func(ctx context.Context, t *testing.T, ds datastore.DataStore, wt *wrapperTest) { - require.NoError(t, ds.TaintX509CA(ctx, bundle2.TrustDomainId, rootCA.PublicKey)) + subjectKeyID := x509util.SubjectKeyIDToString(rootCA.SubjectKeyId) + require.NoError(t, ds.TaintX509CA(ctx, bundle2.TrustDomainId, subjectKeyID)) // TaintX509CA should not call the callback function require.False(t, wt.callbackCalled) - require.NoError(t, ds.RevokeX509CA(ctx, bundle2.TrustDomainId, rootCA.PublicKey)) + require.NoError(t, ds.RevokeX509CA(ctx, bundle2.TrustDomainId, subjectKeyID)) require.True(t, wt.callbackCalled) }, }, diff --git a/pkg/server/ca/ca.go b/pkg/server/ca/ca.go index 8b52ae1e98f..673ca817d1b 100644 --- a/pkg/server/ca/ca.go +++ b/pkg/server/ca/ca.go @@ -36,6 +36,7 @@ type ServerCA interface { SignAgentX509SVID(ctx context.Context, params AgentX509SVIDParams) ([]*x509.Certificate, error) SignWorkloadX509SVID(ctx context.Context, params WorkloadX509SVIDParams) ([]*x509.Certificate, error) SignWorkloadJWTSVID(ctx context.Context, params WorkloadJWTSVIDParams) (string, error) + TaintedAuthorities() <-chan []*x509.Certificate } // DownstreamX509CAParams are parameters relevant to downstream X.509 CA creation @@ -133,10 +134,11 @@ type Config struct { type CA struct { c Config - mu sync.RWMutex - x509CA *X509CA - x509CAChain []*x509.Certificate - jwtKey *JWTKey + mu sync.RWMutex + x509CA *X509CA + x509CAChain []*x509.Certificate + jwtKey *JWTKey + taintedAuthoritiesCh chan []*x509.Certificate } func NewCA(config Config) *CA { @@ -146,6 +148,9 @@ func NewCA(config Config) *CA { ca := &CA{ c: config, + + // Notify caller about any tainted authority + taintedAuthoritiesCh: make(chan []*x509.Certificate, 1), } _ = config.HealthChecker.AddCheck("server.ca", &caHealth{ @@ -188,6 +193,17 @@ func (ca *CA) SetJWTKey(jwtKey *JWTKey) { ca.jwtKey = jwtKey } +func (ca *CA) NotifyTaintedX509Authorities(taintedAuthorities []*x509.Certificate) { + select { + case ca.taintedAuthoritiesCh <- taintedAuthorities: + default: + } +} + +func (ca *CA) TaintedAuthorities() <-chan []*x509.Certificate { + return ca.taintedAuthoritiesCh +} + func (ca *CA) SignDownstreamX509CA(ctx context.Context, params DownstreamX509CAParams) ([]*x509.Certificate, error) { x509CA, caChain, err := ca.getX509CA() if err != nil { diff --git a/pkg/server/ca/ca_test.go b/pkg/server/ca/ca_test.go index 6472f22d3aa..6dcab1a4689 100644 --- a/pkg/server/ca/ca_test.go +++ b/pkg/server/ca/ca_test.go @@ -436,6 +436,23 @@ func (s *CATestSuite) TestNoJWTKeySet() { s.Require().EqualError(err, "JWT key is not available for signing") } +func (s *CATestSuite) TestTaintedAuthoritiesArePropagated() { + authorities := []*x509.Certificate{ + {Raw: []byte("foh")}, + {Raw: []byte("bar")}, + } + s.ca.NotifyTaintedX509Authorities(authorities) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + select { + case got := <-s.ca.TaintedAuthorities(): + s.Require().Equal(authorities, got) + case <-ctx.Done(): + s.Fail("no notification received") + } +} + func (s *CATestSuite) TestSignWorkloadJWTSVIDUsesDefaultTTLIfTTLUnspecified() { token, err := s.ca.SignWorkloadJWTSVID(ctx, s.createJWTSVIDParams(trustDomainExample, 0)) s.Require().NoError(err) diff --git a/pkg/server/ca/manager/journal.go b/pkg/server/ca/manager/journal.go index 14463fba5c4..ae9250caf17 100644 --- a/pkg/server/ca/manager/journal.go +++ b/pkg/server/ca/manager/journal.go @@ -61,13 +61,14 @@ func (j *Journal) AppendX509CA(ctx context.Context, slotID string, issuedAt time backup := j.entries.X509CAs j.entries.X509CAs = append(j.entries.X509CAs, &journal.X509CAEntry{ - SlotId: slotID, - IssuedAt: issuedAt.Unix(), - NotAfter: x509CA.Certificate.NotAfter.Unix(), - Certificate: x509CA.Certificate.Raw, - UpstreamChain: chainDER(x509CA.UpstreamChain), - Status: journal.Status_PREPARED, - AuthorityId: x509util.SubjectKeyIDToString(x509CA.Certificate.SubjectKeyId), + SlotId: slotID, + IssuedAt: issuedAt.Unix(), + NotAfter: x509CA.Certificate.NotAfter.Unix(), + Certificate: x509CA.Certificate.Raw, + UpstreamChain: chainDER(x509CA.UpstreamChain), + Status: journal.Status_PREPARED, + AuthorityId: x509util.SubjectKeyIDToString(x509CA.Certificate.SubjectKeyId), + UpstreamAuthorityId: x509util.SubjectKeyIDToString(x509CA.Certificate.AuthorityKeyId), }) exceeded := len(j.entries.X509CAs) - journalCap diff --git a/pkg/server/ca/manager/manager.go b/pkg/server/ca/manager/manager.go index 9477ce2ea13..e67530ef13d 100644 --- a/pkg/server/ca/manager/manager.go +++ b/pkg/server/ca/manager/manager.go @@ -6,6 +6,7 @@ import ( "crypto" "crypto/rand" "crypto/x509" + "errors" "fmt" "sync" "time" @@ -13,6 +14,8 @@ import ( "github.com/andres-erbsen/clock" "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/backoff" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/common/telemetry" telemetry_server "github.com/spiffe/spire/pkg/common/telemetry/server" "github.com/spiffe/spire/pkg/common/x509util" @@ -42,17 +45,35 @@ const ( sevenDays = 7 * 24 * time.Hour activationThresholdCap = sevenDays activationThresholdDivisor = 6 + + taintBackoffInterval = 5 * time.Second + taintBackoffMaxElapsedTime = 1 * time.Minute ) type ManagedCA interface { SetX509CA(*ca.X509CA) SetJWTKey(*ca.JWTKey) + NotifyTaintedX509Authorities([]*x509.Certificate) } type JwtKeyPublisher interface { PublishJWTKey(ctx context.Context, jwtKey *common.PublicKey) ([]*common.PublicKey, error) } +type AuthorityManager interface { + GetCurrentJWTKeySlot() Slot + GetNextJWTKeySlot() Slot + PrepareJWTKey(ctx context.Context) error + RotateJWTKey(ctx context.Context) + GetCurrentX509CASlot() Slot + GetNextX509CASlot() Slot + PrepareX509CA(ctx context.Context) error + RotateX509CA(ctx context.Context) + IsUpstreamAuthority() bool + PublishJWTKey(ctx context.Context, jwtKey *common.PublicKey) ([]*common.PublicKey, error) + NotifyTaintedX509Authority(ctx context.Context, authorityID string) error +} + type Config struct { CredBuilder *credtemplate.Builder CredValidator *credvalidator.Validator @@ -68,11 +89,12 @@ type Config struct { } type Manager struct { - c Config - caTTL time.Duration - bundleUpdatedCh chan struct{} - upstreamClient *ca.UpstreamClient - upstreamPluginName string + c Config + caTTL time.Duration + bundleUpdatedCh chan struct{} + taintedUpstreamAuthoritiesCh chan []*x509.Certificate + upstreamClient *ca.UpstreamClient + upstreamPluginName string currentX509CA *x509CASlot nextX509CA *x509CASlot @@ -86,6 +108,9 @@ type Manager struct { // Used to log a warning only once when the UpstreamAuthority does not support JWT-SVIDs. jwtUnimplementedWarnOnce sync.Once + + // Used for testing backoff, must not be set in regular code + triggerBackOffCh chan error } func NewManager(ctx context.Context, c Config) (*Manager, error) { @@ -94,19 +119,22 @@ func NewManager(ctx context.Context, c Config) (*Manager, error) { } m := &Manager{ - c: c, - caTTL: c.CredBuilder.Config().X509CATTL, - bundleUpdatedCh: make(chan struct{}, 1), + c: c, + caTTL: c.CredBuilder.Config().X509CATTL, + bundleUpdatedCh: make(chan struct{}, 1), + taintedUpstreamAuthoritiesCh: make(chan []*x509.Certificate, 1), } if upstreamAuthority, ok := c.Catalog.GetUpstreamAuthority(); ok { m.upstreamClient = ca.NewUpstreamClient(ca.UpstreamClientConfig{ UpstreamAuthority: upstreamAuthority, BundleUpdater: &bundleUpdater{ - log: c.Log, - trustDomainID: c.TrustDomain.IDString(), - ds: c.Catalog.GetDataStore(), - updated: m.bundleUpdated, + log: c.Log, + trustDomainID: c.TrustDomain.IDString(), + ds: c.Catalog.GetDataStore(), + updated: m.bundleUpdated, + upstreamAuthoritiesTainted: m.notifyUpstreamAuthoritiesTainted, + processedTaintedAuthorities: map[string]struct{}{}, }, }) m.upstreamPluginName = upstreamAuthority.Name() @@ -167,6 +195,16 @@ func (m *Manager) Close() { } } +func (m *Manager) NotifyTaintedX509Authority(ctx context.Context, authoirtyID string) error { + taintedAuthority, err := m.fetchRootCAByAuthorityID(ctx, authoirtyID) + if err != nil { + return err + } + + m.c.CA.NotifyTaintedX509Authorities([]*x509.Certificate{taintedAuthority}) + return nil +} + func (m *Manager) GetCurrentX509CASlot() Slot { m.x509CAMutex.RLock() defer m.x509CAMutex.RUnlock() @@ -227,6 +265,7 @@ func (m *Manager) PrepareX509CA(ctx context.Context) (err error) { // Set key from new CA, to be able to get it after // slot moved to old state slot.authorityID = x509util.SubjectKeyIDToString(x509CA.Certificate.SubjectKeyId) + slot.upstreamAuthorityID = x509util.SubjectKeyIDToString(x509CA.Certificate.AuthorityKeyId) slot.publicKey = slot.x509CA.Certificate.PublicKey slot.notAfter = slot.x509CA.Certificate.NotAfter @@ -235,15 +274,20 @@ func (m *Manager) PrepareX509CA(ctx context.Context) (err error) { } m.c.Log.WithFields(logrus.Fields{ - telemetry.Slot: slot.id, - telemetry.IssuedAt: slot.issuedAt, - telemetry.Expiration: slot.x509CA.Certificate.NotAfter, - telemetry.SelfSigned: m.upstreamClient == nil, - telemetry.LocalAuthorityID: slot.authorityID, + telemetry.Slot: slot.id, + telemetry.IssuedAt: slot.issuedAt, + telemetry.Expiration: slot.x509CA.Certificate.NotAfter, + telemetry.SelfSigned: m.upstreamClient == nil, + telemetry.LocalAuthorityID: slot.authorityID, + telemetry.UpstreamAuthorityID: slot.upstreamAuthorityID, }).Info("X509 CA prepared") return nil } +func (m *Manager) IsUpstreamAuthority() bool { + return m.upstreamClient != nil +} + func (m *Manager) ActivateX509CA(ctx context.Context) { m.x509CAMutex.RLock() defer m.x509CAMutex.RUnlock() @@ -441,13 +485,19 @@ func (m *Manager) PruneCAJournals(ctx context.Context) (err error) { return nil } -func (m *Manager) NotifyOnBundleUpdate(ctx context.Context) { +// ProcessBundleUpdates Notify any bundle update, or process tainted authorities +func (m *Manager) ProcessBundleUpdates(ctx context.Context) { for { select { case <-m.bundleUpdatedCh: if err := m.notifyBundleUpdated(ctx); err != nil { m.c.Log.WithError(err).Warn("Failed to notify on bundle update") } + case taintedAuthorities := <-m.taintedUpstreamAuthoritiesCh: + if err := m.notifyTaintedAuthorities(ctx, taintedAuthorities); err != nil { + m.c.Log.WithError(err).Error("Failed to force intermediate bundle rotation") + return + } case <-ctx.Done(): return } @@ -493,10 +543,11 @@ func (m *Manager) activateJWTKey(ctx context.Context) { func (m *Manager) activateX509CA(ctx context.Context) { log := m.c.Log.WithFields(logrus.Fields{ - telemetry.Slot: m.currentX509CA.id, - telemetry.IssuedAt: m.currentX509CA.issuedAt, - telemetry.Expiration: m.currentX509CA.x509CA.Certificate.NotAfter, - telemetry.LocalAuthorityID: m.currentX509CA.authorityID, + telemetry.Slot: m.currentX509CA.id, + telemetry.IssuedAt: m.currentX509CA.issuedAt, + telemetry.Expiration: m.currentX509CA.x509CA.Certificate.NotAfter, + telemetry.LocalAuthorityID: m.currentX509CA.authorityID, + telemetry.UpstreamAuthorityID: m.currentX509CA.upstreamAuthorityID, }) log.Info("X509 CA activated") telemetry_server.IncrActivateX509CAManagerCounter(m.c.Metrics) @@ -529,6 +580,114 @@ func (m *Manager) dropBundleUpdated() { default: } } + +func (m *Manager) notifyUpstreamAuthoritiesTainted(taintedAuthorities []*x509.Certificate) { + select { + case m.taintedUpstreamAuthoritiesCh <- taintedAuthorities: + default: + } +} + +func (m *Manager) fetchRootCAByAuthorityID(ctx context.Context, authorityID string) (*x509.Certificate, error) { + bundle, err := m.fetchRequiredBundle(ctx) + if err != nil { + return nil, err + } + + for _, rootCA := range bundle.RootCas { + if rootCA.TaintedKey { + cert, err := x509.ParseCertificate(rootCA.DerBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse RootCA: %w", err) + } + + skID := x509util.SubjectKeyIDToString(cert.SubjectKeyId) + if authorityID == skID { + return cert, nil + } + } + } + + return nil, fmt.Errorf("no tainted root CA found with authority ID: %q", authorityID) +} + +func (m *Manager) notifyTaintedAuthorities(ctx context.Context, taintedAuthorities []*x509.Certificate) error { + taintBackoff := backoff.NewBackoff( + m.c.Clock, + taintBackoffInterval, + backoff.WithMaxElapsedTime(taintBackoffMaxElapsedTime), + ) + + for { + err := m.processTaintedUpstreamAuthorities(ctx, taintedAuthorities) + if err == nil { + break + } + + nextDuration := taintBackoff.NextBackOff() + if nextDuration == backoff.Stop { + return err + } + m.c.Log.WithError(err).Warn("Failed to process tainted keys on upstream authority") + if m.triggerBackOffCh != nil { + m.triggerBackOffCh <- err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-m.c.Clock.After(nextDuration): + continue + } + } + + return nil +} + +func (m *Manager) processTaintedUpstreamAuthorities(ctx context.Context, taintedAuthorities []*x509.Certificate) error { + // Nothing to rotate if no upstream authority is used + if m.upstreamClient == nil { + return errors.New("processing of tainted upstream authorities must not be reached when not using an upstream authority; please report this bug") + } + + if len(taintedAuthorities) == 0 { + // No tainted keys found + return nil + } + + m.c.Log.Debug("Processing tainted keys on upstream authority") + + currentSlotCA := m.currentX509CA.x509CA + if ok := isX509AuthorityTainted(currentSlotCA, taintedAuthorities); ok { + m.c.Log.Info("Current root CA is signed by a tainted upstream authority, preparing rotation") + if ok := m.shouldPrepareX509CA(taintedAuthorities); ok { + if err := m.PrepareX509CA(ctx); err != nil { + return fmt.Errorf("failed to prepare x509 authority: %w", err) + } + } + + // Activate the prepared X.509 authority + m.RotateX509CA(ctx) + } + + // Now that we have rotated the intermediate, we can notify about the + // tainted authorities, so agents and downstream servers can start forcing + // the rotation of their SVIDs. + ds := m.c.Catalog.GetDataStore() + for _, each := range taintedAuthorities { + skID := x509util.SubjectKeyIDToString(each.SubjectKeyId) + if err := ds.TaintX509CA(ctx, m.c.TrustDomain.IDString(), skID); err != nil { + return fmt.Errorf("could not taint X509 CA in datastore: %w", err) + } + } + + // Intermediate is safe. Notify rotator to force rotation + // of tainted X.509 SVID. + m.c.CA.NotifyTaintedX509Authorities(taintedAuthorities) + + return nil +} + func (m *Manager) notifyBundleUpdated(ctx context.Context) error { var bundle *common.Bundle return m.notify(ctx, "bundle updated", false, @@ -691,6 +850,20 @@ func (m *Manager) appendBundle(ctx context.Context, caChain []*x509.Certificate, return res, nil } +func (m *Manager) shouldPrepareX509CA(taintedAuthorities []*x509.Certificate) bool { + slot := m.nextX509CA + switch { + case slot.IsEmpty(): + return true + case slot.Status() == journal.Status_PREPARED: + isTainted := isX509AuthorityTainted(slot.x509CA, taintedAuthorities) + m.c.Log.Info("Next authority is tainted, prepare new X.509 authority") + return isTainted + default: + return false + } +} + // MaxSVIDTTL returns the maximum SVID lifetime that can be guaranteed to not // be cut artificially short by a scheduled rotation. func MaxSVIDTTL() time.Duration { @@ -719,23 +892,76 @@ func MinCATTLForSVIDTTL(svidTTL time.Duration) time.Duration { } type bundleUpdater struct { - log logrus.FieldLogger - trustDomainID string - ds datastore.DataStore - updated func() + log logrus.FieldLogger + trustDomainID string + ds datastore.DataStore + updated func() + upstreamAuthoritiesTainted func([]*x509.Certificate) + processedTaintedAuthorities map[string]struct{} } -func (u *bundleUpdater) AppendX509Roots(ctx context.Context, roots []*x509.Certificate) error { +func (u *bundleUpdater) SyncX509Roots(ctx context.Context, roots []*x509certificate.X509Authority) error { bundle := &common.Bundle{ TrustDomainId: u.trustDomainID, RootCas: make([]*common.Certificate, 0, len(roots)), } + x509Authorities, err := u.fetchX509Authorities(ctx) + if err != nil { + return err + } + + newAuthorities := make(map[string]struct{}, len(roots)) + var taintedAuthorities []*x509.Certificate for _, root := range roots { + skID := x509util.SubjectKeyIDToString(root.Certificate.SubjectKeyId) + // Collect all skIDs + newAuthorities[skID] = struct{}{} + + // Verify if new root ca is tainted + if root.Tainted { + // Taint x.509 authority, if required + if found, ok := x509Authorities[skID]; ok && !found.Tainted { + _, alreadyProcessed := u.processedTaintedAuthorities[skID] + if !alreadyProcessed { + u.processedTaintedAuthorities[skID] = struct{}{} + // Add to the list of new tainted authorities + taintedAuthorities = append(taintedAuthorities, found.Certificate) + u.log.WithField(telemetry.SubjectKeyID, skID).Info("X.509 authority tainted") + } + // Prevent to add tainted keys, since status is updated before + continue + } + } + bundle.RootCas = append(bundle.RootCas, &common.Certificate{ - DerBytes: root.Raw, + DerBytes: root.Certificate.Raw, + TaintedKey: root.Tainted, }) } + + // Notify about tainted authorities to force the rotation of + // intermediates and update the database. This is done in a separate thread + // to prevent agents and downstream servers to start the rotation before the + // current server starts the rotation of the intermediate. + if len(taintedAuthorities) > 0 { + u.upstreamAuthoritiesTainted(taintedAuthorities) + } + + for skID, authority := range x509Authorities { + // Only tainted keys can ke revoked + if authority.Tainted { + // In case a stored tainted authority is not found, + // from latest bundle update, then revoke it + if _, found := newAuthorities[skID]; !found { + if err := u.ds.RevokeX509CA(ctx, u.trustDomainID, skID); err != nil { + return fmt.Errorf("failed to revoke a tainted key %q: %w", skID, err) + } + u.log.WithField(telemetry.SubjectKeyID, skID).Info("X.509 authority revoked") + } + } + } + if _, err := u.appendBundle(ctx, bundle); err != nil { return err } @@ -757,6 +983,32 @@ func (u *bundleUpdater) LogError(err error, msg string) { u.log.WithError(err).Error(msg) } +func (u *bundleUpdater) fetchX509Authorities(ctx context.Context) (map[string]*x509certificate.X509Authority, error) { + bundle, err := u.ds.FetchBundle(ctx, u.trustDomainID) + if err != nil { + return nil, fmt.Errorf("failed to fetch bundle: %w", err) + } + // Bundle not found + if bundle == nil { + return nil, nil + } + + authorities := map[string]*x509certificate.X509Authority{} + for _, eachRoot := range bundle.RootCas { + cert, err := x509.ParseCertificate(eachRoot.DerBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse root certificate: %w", err) + } + + authorities[x509util.SubjectKeyIDToString(cert.SubjectKeyId)] = &x509certificate.X509Authority{ + Certificate: cert, + Tainted: eachRoot.TaintedKey, + } + } + + return authorities, nil +} + func (u *bundleUpdater) appendBundle(ctx context.Context, bundle *common.Bundle) (*common.Bundle, error) { dsBundle, err := u.ds.AppendBundle(ctx, bundle) if err != nil { @@ -809,3 +1061,24 @@ func publicKeyFromJWTKey(jwtKey *ca.JWTKey) (*common.PublicKey, error) { NotAfter: jwtKey.NotAfter.Unix(), }, nil } + +// isX509AuthorityTainted verifies if the provided X.509 authority is tainted +func isX509AuthorityTainted(x509CA *ca.X509CA, taintedAuthorities []*x509.Certificate) bool { + rootPool := x509.NewCertPool() + for _, taintedKey := range taintedAuthorities { + rootPool.AddCert(taintedKey) + } + + intermediatePool := x509.NewCertPool() + for _, intermediateCA := range x509CA.UpstreamChain { + intermediatePool.AddCert(intermediateCA) + } + + // Verify certificate chain, using tainted authority as root + _, err := x509CA.Certificate.Verify(x509.VerifyOptions{ + Intermediates: intermediatePool, + Roots: rootPool, + }) + + return err == nil +} diff --git a/pkg/server/ca/manager/manager_test.go b/pkg/server/ca/manager/manager_test.go index 3049851b704..3050da19fbd 100644 --- a/pkg/server/ca/manager/manager_test.go +++ b/pkg/server/ca/manager/manager_test.go @@ -17,7 +17,9 @@ import ( "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" telemetry_server "github.com/spiffe/spire/pkg/common/telemetry/server" + "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/pkg/server/ca" "github.com/spiffe/spire/pkg/server/credtemplate" "github.com/spiffe/spire/pkg/server/credvalidator" @@ -35,6 +37,7 @@ import ( "github.com/spiffe/spire/test/fakes/fakeserverkeymanager" "github.com/spiffe/spire/test/fakes/fakeupstreamauthority" "github.com/spiffe/spire/test/spiretest" + "github.com/spiffe/spire/test/testca" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -56,6 +59,7 @@ func TestGetCurrentJWTKeySlot(t *testing.T) { test := setupTest(t) test.initSelfSignedManager() + require.False(t, test.m.IsUpstreamAuthority()) t.Run("no authority created", func(t *testing.T) { currentSlot := test.m.GetCurrentJWTKeySlot() @@ -126,6 +130,7 @@ func TestGetCurrentX509CASlot(t *testing.T) { slot := currentSlot.(*x509CASlot) require.Nil(t, slot.x509CA) require.Empty(t, slot.authorityID) + require.Empty(t, slot.upstreamAuthorityID) require.Empty(t, slot.issuedAt) require.Empty(t, slot.publicKey) require.Empty(t, slot.notAfter) @@ -141,6 +146,7 @@ func TestGetCurrentX509CASlot(t *testing.T) { slot := currentSlot.(*x509CASlot) require.NotNil(t, slot.x509CA) require.NotEmpty(t, slot.authorityID) + require.Empty(t, slot.upstreamAuthorityID) require.NotNil(t, slot.publicKey) require.Equal(t, expectIssuedAt, slot.issuedAt) require.Equal(t, expectNotAfter, slot.notAfter) @@ -159,6 +165,7 @@ func TestGetNextX509CASlot(t *testing.T) { require.Nil(t, slot.x509CA) require.Empty(t, slot.authorityID) + require.Empty(t, slot.upstreamAuthorityID) require.Empty(t, slot.issuedAt) require.Empty(t, slot.publicKey) require.Empty(t, slot.notAfter) @@ -174,6 +181,7 @@ func TestGetNextX509CASlot(t *testing.T) { slot := nextSlot.(*x509CASlot) require.NotNil(t, slot.x509CA) require.NotEmpty(t, slot.authorityID) + require.Empty(t, slot.upstreamAuthorityID) require.NotNil(t, slot.publicKey) require.Equal(t, expectIssuedAt, slot.issuedAt) require.Equal(t, expectNotAfter, slot.notAfter) @@ -240,6 +248,65 @@ func TestSlotLoadedWhenJournalIsLost(t *testing.T) { require.True(t, test.m.GetCurrentX509CASlot().IsEmpty()) } +func TestNotifyTaintedX509Authority(t *testing.T) { + ctx := context.Background() + test := setupTest(t) + test.initSelfSignedManager() + + // Create a test CA + ca := testca.New(t, testTrustDomain) + cert := ca.X509Authorities()[0] + bundle, err := test.ds.CreateBundle(ctx, &common.Bundle{ + TrustDomainId: testTrustDomain.IDString(), + RootCas: []*common.Certificate{ + { + DerBytes: cert.Raw, + TaintedKey: true, + }, + }, + }) + require.NoError(t, err) + + t.Run("notify tainted authority", func(t *testing.T) { + err = test.m.NotifyTaintedX509Authority(ctx, ca.GetSubjectKeyID()) + require.NoError(t, err) + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + expectedTaintedAuthorities := []*x509.Certificate{cert} + select { + case taintedAuthorities := <-test.ca.taintedAuthoritiesCh: + require.Equal(t, expectedTaintedAuthorities, taintedAuthorities) + case <-ctx.Done(): + assert.Fail(t, "no notification received") + } + }) + + // Untaint authority + bundle.RootCas[0].TaintedKey = false + bundle, err = test.ds.UpdateBundle(ctx, bundle, nil) + require.NoError(t, err) + + t.Run("no tainted authority", func(t *testing.T) { + err := test.m.NotifyTaintedX509Authority(ctx, ca.GetSubjectKeyID()) + + expectedErr := fmt.Sprintf("no tainted root CA found with authority ID: %q", ca.GetSubjectKeyID()) + require.EqualError(t, err, expectedErr) + }) + + bundle.RootCas = append(bundle.RootCas, &common.Certificate{ + DerBytes: []byte("foh"), + TaintedKey: true, + }) + _, err = test.ds.UpdateBundle(ctx, bundle, nil) + require.NoError(t, err) + + t.Run("malformed root CA", func(t *testing.T) { + err := test.m.NotifyTaintedX509Authority(ctx, ca.GetSubjectKeyID()) + require.EqualError(t, err, "failed to parse RootCA: x509: malformed certificate") + }) +} + func TestSelfSigning(t *testing.T) { ctx := context.Background() test := setupTest(t) @@ -262,19 +329,20 @@ func TestUpstreamSigned(t *testing.T) { ctx := context.Background() test := setupTest(t) - upstreamAuthority, fakeUA := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ + upstreamAuthority, fakeUA := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ TrustDomain: testTrustDomain, DisallowPublishJWTKey: true, }) test.initAndActivateUpstreamSignedManager(ctx, upstreamAuthority) + require.True(t, test.m.IsUpstreamAuthority()) // X509 CA should be set up to be an intermediate but only have itself // in the chain since it was signed directly by the upstream root. x509CA := test.currentX509CA() assert.NotNil(t, x509CA.Signer) if assert.NotNil(t, x509CA.Certificate) { - assert.Equal(t, fakeUA.X509Root().Subject, x509CA.Certificate.Issuer) + assert.Equal(t, fakeUA.X509Root().Certificate.Subject, x509CA.Certificate.Issuer) } if assert.Len(t, x509CA.UpstreamChain, 1) { assert.Equal(t, x509CA.Certificate, x509CA.UpstreamChain[0]) @@ -290,12 +358,198 @@ func TestUpstreamSigned(t *testing.T) { "by this server may have trouble communicating with workloads outside "+ "this cluster when using JWT-SVIDs."), ) + + // Taint first root + err := fakeUA.TaintAuthority(0) + require.NoError(t, err) + + // Get the roots again and verify that the first X.509 authority is tainted + x509Roots := fakeUA.X509Roots() + require.True(t, x509Roots[0].Tainted) + + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + select { + case taintedAuthorities := <-test.m.taintedUpstreamAuthoritiesCh: + expectedTaintedAuthorities := []*x509.Certificate{x509Roots[0].Certificate} + require.Equal(t, expectedTaintedAuthorities, taintedAuthorities) + case <-ctx.Done(): + assert.Fail(t, "no notification received") + } +} + +func TestUpstreamProcesssTaintedAuthority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + test := setupTest(t) + + upstreamAuthority, fakeUA := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ + TrustDomain: testTrustDomain, + DisallowPublishJWTKey: true, + }) + + test.initAndActivateUpstreamSignedManager(ctx, upstreamAuthority) + require.True(t, test.m.IsUpstreamAuthority()) + + // Prepared must be tainted too + err := test.m.PrepareX509CA(ctx) + require.NoError(t, err) + + go test.m.ProcessBundleUpdates(ctx) + + // Taint first root + err = fakeUA.TaintAuthority(0) + require.NoError(t, err) + + // Get the roots again and verify that the first X.509 authority is tainted + x509Roots := fakeUA.X509Roots() + require.True(t, x509Roots[0].Tainted) + + commonCertificates := x509certificate.RequireToCommonProtos(x509Roots) + // Retry until the Tainted attribute is propagated to the database + require.Eventually(t, func() bool { + bundle := test.fetchBundle(ctx) + return spiretest.AssertProtoListEqual(t, commonCertificates, bundle.RootCas) + }, time.Minute, 500*time.Millisecond) + + expectedTaintedAuthorities := []*x509.Certificate{x509Roots[0].Certificate} + select { + case received := <-test.ca.taintedAuthoritiesCh: + require.Equal(t, expectedTaintedAuthorities, received) + case <-ctx.Done(): + assert.Fail(t, "deadline reached") + } +} + +func TestUpstreamProcesssTaintedAuthorityBackoff(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + test := setupTest(t) + + upstreamAuthority, fakeUA := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ + TrustDomain: testTrustDomain, + DisallowPublishJWTKey: true, + }) + + test.initAndActivateUpstreamSignedManager(ctx, upstreamAuthority) + require.True(t, test.m.IsUpstreamAuthority()) + + test.m.triggerBackOffCh = make(chan error, 1) + + // Prepared must be tainted too + go test.m.ProcessBundleUpdates(ctx) + + // Set an invalid key type to make prepare fails + test.m.c.X509CAKeyType = 123 + err := test.m.PrepareX509CA(ctx) + require.Error(t, err) + + // Taint first root + err = fakeUA.TaintAuthority(0) + require.NoError(t, err) + + // Get the roots again and verify that the first X.509 authority is tainted + x509Roots := fakeUA.X509Roots() + require.True(t, x509Roots[0].Tainted) + + expectBackoffErr := func(t *testing.T) { + select { + case receivedErr := <-test.m.triggerBackOffCh: + require.EqualError(t, receivedErr, "failed to prepare x509 authority: rpc error: code = Internal desc = keymanager(fake): facade does not support key type \"UNKNOWN(123)\"") + case <-ctx.Done(): + assert.Fail(t, "deadline reached") + } + } + + // Must fail due to the invalid key type + expectBackoffErr(t) + + // Try again; expect to fail + test.clock.Add(6 * time.Second) + expectBackoffErr(t) + + // Restore to a valid key type, and advance time again + test.m.c.X509CAKeyType = keymanager.ECP256 + test.clock.Add(10 * time.Second) + + commonCertificates := x509certificate.RequireToCommonProtos(x509Roots) + // Retry until the Tainted attribute is propagated to the database + require.Eventually(t, func() bool { + bundle := test.fetchBundle(ctx) + return spiretest.AssertProtoListEqual(t, commonCertificates, bundle.RootCas) + }, time.Minute, 500*time.Millisecond) + + expectedTaintedAuthorities := []*x509.Certificate{x509Roots[0].Certificate} + select { + case received := <-test.ca.taintedAuthoritiesCh: + require.Equal(t, expectedTaintedAuthorities, received) + case <-ctx.Done(): + assert.Fail(t, "deadline reached") + } +} + +func TestGetCurrentX509CASlotUpstreamSigned(t *testing.T) { + ctx := context.Background() + + test := setupTest(t) + + upstreamAuthority, ua := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ + TrustDomain: testTrustDomain, + DisallowPublishJWTKey: true, + }) + + test.initAndActivateUpstreamSignedManager(ctx, upstreamAuthority) + + expectIssuedAt := test.clock.Now() + expectNotAfter := expectIssuedAt.Add(test.m.caTTL).UTC() + expectUpstreamAuthorityID := x509util.SubjectKeyIDToString(ua.X509Root().Certificate.SubjectKeyId) + + require.NoError(t, test.m.PrepareX509CA(ctx)) + + currentSlot := test.m.GetCurrentX509CASlot() + slot := currentSlot.(*x509CASlot) + require.NotNil(t, slot.x509CA) + require.NotEmpty(t, slot.authorityID) + require.Equal(t, expectUpstreamAuthorityID, slot.upstreamAuthorityID) + require.NotNil(t, slot.publicKey) + require.Equal(t, expectIssuedAt, slot.issuedAt) + require.Equal(t, expectNotAfter, slot.notAfter) +} + +func TestGetNextX509CASlotUpstreamSigned(t *testing.T) { + ctx := context.Background() + + test := setupTest(t) + upstreamAuthority, ua := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ + TrustDomain: testTrustDomain, + DisallowPublishJWTKey: true, + }) + + test.initAndActivateUpstreamSignedManager(ctx, upstreamAuthority) + + expectIssuedAt := test.clock.Now() + expectNotAfter := expectIssuedAt.Add(test.m.caTTL).UTC() + expectUpstreamAuthorityID := x509util.SubjectKeyIDToString(ua.X509Root().Certificate.SubjectKeyId) + + require.NoError(t, test.m.PrepareX509CA(ctx)) + + nextSlot := test.m.GetNextX509CASlot() + slot := nextSlot.(*x509CASlot) + require.NotNil(t, slot.x509CA) + require.NotEmpty(t, slot.authorityID) + require.Equal(t, expectUpstreamAuthorityID, slot.upstreamAuthorityID) + require.NotNil(t, slot.publicKey) + require.Equal(t, expectIssuedAt, slot.issuedAt) + require.Equal(t, expectNotAfter, slot.notAfter) } func TestUpstreamSignedProducesInvalidChain(t *testing.T) { ctx := context.Background() test := setupTest(t) - upstreamAuthority, _ := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ + upstreamAuthority, _ := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ TrustDomain: testTrustDomain, // The verification code relies on go-spiffe, which for compat reasons, // does not currently validate SPIFFE conformance beyond the leaf @@ -326,7 +580,7 @@ func TestUpstreamSignedProducesInvalidChain(t *testing.T) { func TestUpstreamIntermediateSigned(t *testing.T) { ctx := context.Background() test := setupTest(t) - upstreamAuthority, fakeUA := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ + upstreamAuthority, fakeUA := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ TrustDomain: testTrustDomain, DisallowPublishJWTKey: true, UseIntermediate: true, @@ -363,7 +617,7 @@ func TestUpstreamAuthorityWithPublishJWTKeyImplemented(t *testing.T) { bundle := test.createBundle(ctx) require.Len(t, bundle.JwtSigningKeys, 0) - upstreamAuthority, ua := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ + upstreamAuthority, ua := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ TrustDomain: testTrustDomain, }) test.initAndActivateUpstreamSignedManager(ctx, upstreamAuthority) @@ -392,14 +646,14 @@ func TestX509CARotation(t *testing.T) { // kick off a goroutine to service bundle update notifications. This is // typically handled by Run() but using it would complicate the test. test.m.dropBundleUpdated() - go test.m.NotifyOnBundleUpdate(ctx) + go test.m.ProcessBundleUpdates(ctx) // after initialization, we should have a current X509CA but no next. first := test.currentX509CA() require.Equal(t, journal.Status_ACTIVE, test.currentX509CAStatus()) assert.Nil(t, test.nextX509CA(), "second X509CA should not be prepared yet") require.Equal(t, journal.Status_UNKNOWN, test.nextX509CAStatus()) - test.requireBundleRootCAs(ctx, t, first.Certificate) + test.requireIntermediateRootCA(ctx, t, first.Certificate) // Prepare new X509CA. the current X509CA should stay // the same but the next X509CA should have been prepared and added to @@ -411,7 +665,7 @@ func TestX509CARotation(t *testing.T) { second := test.nextX509CA() assert.NotNil(t, second, "second X509CA should have been prepared") require.Equal(t, journal.Status_PREPARED, test.nextX509CAStatus()) - test.requireBundleRootCAs(ctx, t, first.Certificate, second.Certificate) + test.requireIntermediateRootCA(ctx, t, first.Certificate, second.Certificate) // we should now have a bundle update notification due to the preparation test.waitForBundleUpdatedNotification(ctx, notifyCh) @@ -433,7 +687,7 @@ func TestX509CARotation(t *testing.T) { third := test.nextX509CA() assert.NotNil(t, third, "third X509CA should have been prepared") require.Equal(t, journal.Status_PREPARED, test.nextX509CAStatus()) - test.requireBundleRootCAs(ctx, t, first.Certificate, second.Certificate, third.Certificate) + test.requireIntermediateRootCA(ctx, t, first.Certificate, second.Certificate, third.Certificate) // we should now have another bundle update notification due to the preparation test.waitForBundleUpdatedNotification(ctx, notifyCh) @@ -482,7 +736,7 @@ func TestJWTKeyRotation(t *testing.T) { // kick off a goroutine to service bundle update notifications. This is // typically handled by Run() but using it would complicate the test. test.m.dropBundleUpdated() // drop bundle update message produce by initialization - go test.m.NotifyOnBundleUpdate(ctx) + go test.m.ProcessBundleUpdates(ctx) // after initialization, we should have a current JWTKey but no next. first := test.currentJWTKey() @@ -562,24 +816,24 @@ func TestPruneBundle(t *testing.T) { firstJWTKey := test.currentJWTKey() secondX509CA := test.nextX509CA() secondJWTKey := test.nextJWTKey() - test.requireBundleRootCAs(ctx, t, firstX509CA.Certificate, secondX509CA.Certificate) + test.requireIntermediateRootCA(ctx, t, firstX509CA.Certificate, secondX509CA.Certificate) test.requireBundleJWTKeys(ctx, t, firstJWTKey, secondJWTKey) // kick off a goroutine to service bundle update notifications. This is // typically handled by Run() but using it would complicate the test. test.m.dropBundleUpdated() // drop bundle update message produce by initialization - go test.m.NotifyOnBundleUpdate(ctx) + go test.m.ProcessBundleUpdates(ctx) // advance just past the expiration time of the first and prune. nothing // should change. test.setTimeAndPrune(firstExpiresTime.Add(time.Minute)) - test.requireBundleRootCAs(ctx, t, firstX509CA.Certificate, secondX509CA.Certificate) + test.requireIntermediateRootCA(ctx, t, firstX509CA.Certificate, secondX509CA.Certificate) test.requireBundleJWTKeys(ctx, t, firstJWTKey, secondJWTKey) // advance beyond the safety threshold of the first, prune, and assert that // the first has been pruned test.addTimeAndPrune(safetyThresholdBundle) - test.requireBundleRootCAs(ctx, t, secondX509CA.Certificate) + test.requireIntermediateRootCA(ctx, t, secondX509CA.Certificate) test.requireBundleJWTKeys(ctx, t, secondJWTKey) // we should now have a bundle update notification due to the pruning @@ -589,7 +843,7 @@ func TestPruneBundle(t *testing.T) { // changes because we can't prune out the whole bundle. test.clock.Set(secondExpiresTime.Add(time.Minute + safetyThresholdBundle)) require.EqualError(t, test.m.PruneBundle(context.Background()), "unable to prune bundle: rpc error: code = Unknown desc = prune failed: would prune all certificates") - test.requireBundleRootCAs(ctx, t, secondX509CA.Certificate) + test.requireIntermediateRootCA(ctx, t, secondX509CA.Certificate) test.requireBundleJWTKeys(ctx, t, secondJWTKey) } @@ -785,10 +1039,6 @@ func TestActivationThresholdCap(t *testing.T) { } func TestAlternateKeyTypes(t *testing.T) { - upstreamAuthority, _ := fakeupstreamauthority.Load(t, fakeupstreamauthority.Config{ - TrustDomain: testTrustDomain, - }) - expectRSA := func(t *testing.T, signer crypto.Signer, keySize int) { publicKey, ok := signer.Public().(*rsa.PublicKey) t.Logf("PUBLIC KEY TYPE: %T", signer.Public()) @@ -823,7 +1073,7 @@ func TestAlternateKeyTypes(t *testing.T) { testCases := []struct { name string - upstreamAuthority upstreamauthority.UpstreamAuthority + upstreamAuthority bool x509CAKeyType keymanager.KeyType jwtKeyType keymanager.KeyType checkX509CA func(*testing.T, crypto.Signer) @@ -866,7 +1116,7 @@ func TestAlternateKeyTypes(t *testing.T) { }, { name: "upstream-signed with RSA 2048", - upstreamAuthority: upstreamAuthority, + upstreamAuthority: true, x509CAKeyType: keymanager.RSA2048, jwtKeyType: keymanager.RSA2048, checkX509CA: expectRSA2048, @@ -874,7 +1124,7 @@ func TestAlternateKeyTypes(t *testing.T) { }, { name: "upstream-signed with RSA 4096", - upstreamAuthority: upstreamAuthority, + upstreamAuthority: true, x509CAKeyType: keymanager.RSA4096, jwtKeyType: keymanager.RSA4096, checkX509CA: expectRSA4096, @@ -882,7 +1132,7 @@ func TestAlternateKeyTypes(t *testing.T) { }, { name: "upstream-signed with EC P256", - upstreamAuthority: upstreamAuthority, + upstreamAuthority: true, x509CAKeyType: keymanager.ECP256, jwtKeyType: keymanager.ECP256, checkX509CA: expectEC256, @@ -890,7 +1140,7 @@ func TestAlternateKeyTypes(t *testing.T) { }, { name: "upstream-signed with EC P384", - upstreamAuthority: upstreamAuthority, + upstreamAuthority: true, x509CAKeyType: keymanager.ECP384, jwtKeyType: keymanager.ECP384, checkX509CA: expectEC384, @@ -904,6 +1154,7 @@ func TestAlternateKeyTypes(t *testing.T) { ctx := context.Background() test := setupTest(t) + c := test.selfSignedConfig() c.X509CAKeyType = testCase.x509CAKeyType c.JWTKeyType = testCase.jwtKeyType @@ -913,7 +1164,12 @@ func TestAlternateKeyTypes(t *testing.T) { test.cat.SetKeyManager(fakeserverkeymanager.New(t)) // Optionally provide an upstream authority - test.cat.SetUpstreamAuthority(testCase.upstreamAuthority) + if testCase.upstreamAuthority { + upstreamAuthority, _ := test.newFakeUpstreamAuthority(t, fakeupstreamauthority.Config{ + TrustDomain: testTrustDomain, + }) + test.cat.SetUpstreamAuthority(upstreamAuthority) + } manager, err := NewManager(ctx, c) require.NoError(t, err) @@ -965,7 +1221,9 @@ type managerTest struct { func setupTest(t *testing.T) *managerTest { clock := clock.NewMock(t) - ca := new(fakeCA) + ca := &fakeCA{ + taintedAuthoritiesCh: make(chan []*x509.Certificate, 1), + } log, logHook := test.NewNullLogger() metrics := fakemetrics.New() @@ -992,6 +1250,11 @@ func setupTest(t *testing.T) *managerTest { } } +func (m *managerTest) newFakeUpstreamAuthority(t *testing.T, config fakeupstreamauthority.Config) (upstreamauthority.UpstreamAuthority, *fakeupstreamauthority.UpstreamAuthority) { + config.Clock = m.clock + return fakeupstreamauthority.Load(t, config) +} + func (m *managerTest) initSelfSignedManager() { m.cat.SetUpstreamAuthority(nil) manager, err := NewManager(context.Background(), m.selfSignedConfig()) @@ -1110,7 +1373,7 @@ func (m *managerTest) getSignerInfo(signer crypto.Signer) signerInfo { } } -func (m *managerTest) requireBundleRootCAs(ctx context.Context, t *testing.T, rootCAs ...*x509.Certificate) { +func (m *managerTest) requireIntermediateRootCA(ctx context.Context, t *testing.T, rootCAs ...*x509.Certificate) { expected := &common.Bundle{} for _, rootCA := range rootCAs { expected.RootCas = append(expected.RootCas, &common.Certificate{ @@ -1124,6 +1387,21 @@ func (m *managerTest) requireBundleRootCAs(ctx context.Context, t *testing.T, ro }) } +func (m *managerTest) requireBundleRootCAs(ctx context.Context, t *testing.T, rootCAs ...*x509certificate.X509Authority) { + expected := &common.Bundle{} + for _, rootCA := range rootCAs { + expected.RootCas = append(expected.RootCas, &common.Certificate{ + DerBytes: rootCA.Certificate.Raw, + TaintedKey: rootCA.Tainted, + }) + } + + bundle := m.fetchBundle(ctx) + spiretest.RequireProtoEqual(t, expected, &common.Bundle{ + RootCas: bundle.RootCas, + }) +} + func (m *managerTest) requireBundleJWTKeys(ctx context.Context, t *testing.T, jwtKeys ...*ca.JWTKey) { expected := &common.Bundle{} for _, jwtKey := range jwtKeys { @@ -1247,6 +1525,8 @@ type fakeCA struct { mu sync.Mutex x509CA *ca.X509CA jwtKey *ca.JWTKey + + taintedAuthoritiesCh chan []*x509.Certificate } func (s *fakeCA) X509CA() *ca.X509CA { @@ -1272,3 +1552,7 @@ func (s *fakeCA) SetJWTKey(jwtKey *ca.JWTKey) { defer s.mu.Unlock() s.jwtKey = jwtKey } + +func (s *fakeCA) NotifyTaintedX509Authorities(taintedAuthorities []*x509.Certificate) { + s.taintedAuthoritiesCh <- taintedAuthorities +} diff --git a/pkg/server/ca/manager/slot.go b/pkg/server/ca/manager/slot.go index a7725546319..d47a0c9b393 100644 --- a/pkg/server/ca/manager/slot.go +++ b/pkg/server/ca/manager/slot.go @@ -40,7 +40,9 @@ type Slot interface { ShouldPrepareNext(now time.Time) bool ShouldActivateNext(now time.Time) bool Status() journal.Status + UpstreamAuthorityID() string AuthorityID() string + // TODO: This will be removed as part of #5390 PublicKey() crypto.PublicKey NotAfter() time.Time } @@ -321,19 +323,21 @@ func (s *SlotLoader) tryLoadX509CASlotFromEntry(ctx context.Context, entry *jour slot, badReason, err := s.loadX509CASlotFromEntry(ctx, entry) if err != nil { s.Log.WithError(err).WithFields(logrus.Fields{ - telemetry.Slot: entry.SlotId, - telemetry.IssuedAt: time.Unix(entry.IssuedAt, 0), - telemetry.Status: entry.Status, - telemetry.LocalAuthorityID: entry.AuthorityId, + telemetry.Slot: entry.SlotId, + telemetry.IssuedAt: time.Unix(entry.IssuedAt, 0), + telemetry.Status: entry.Status, + telemetry.LocalAuthorityID: entry.AuthorityId, + telemetry.UpstreamAuthorityID: entry.UpstreamAuthorityId, }).Error("X509CA slot failed to load") return nil, err } if badReason != "" { s.Log.WithError(errors.New(badReason)).WithFields(logrus.Fields{ - telemetry.Slot: entry.SlotId, - telemetry.IssuedAt: time.Unix(entry.IssuedAt, 0), - telemetry.Status: entry.Status, - telemetry.LocalAuthorityID: entry.AuthorityId, + telemetry.Slot: entry.SlotId, + telemetry.IssuedAt: time.Unix(entry.IssuedAt, 0), + telemetry.Status: entry.Status, + telemetry.LocalAuthorityID: entry.AuthorityId, + telemetry.UpstreamAuthorityID: entry.UpstreamAuthorityId, }).Warn("X509CA slot unusable") return nil, nil } @@ -379,10 +383,11 @@ func (s *SlotLoader) loadX509CASlotFromEntry(ctx context.Context, entry *journal Certificate: cert, UpstreamChain: upstreamChain, }, - status: entry.Status, - authorityID: entry.AuthorityId, - publicKey: signer.Public(), - notAfter: cert.NotAfter, + status: entry.Status, + authorityID: entry.AuthorityId, + upstreamAuthorityID: entry.UpstreamAuthorityId, + publicKey: signer.Public(), + notAfter: cert.NotAfter, }, "", nil } @@ -524,13 +529,14 @@ func keyActivationThreshold(issuedAt, notAfter time.Time) time.Time { } type x509CASlot struct { - id string - issuedAt time.Time - x509CA *ca.X509CA - status journal.Status - authorityID string - publicKey crypto.PublicKey - notAfter time.Time + id string + issuedAt time.Time + x509CA *ca.X509CA + status journal.Status + authorityID string + publicKey crypto.PublicKey + notAfter time.Time + upstreamAuthorityID string } func newX509CASlot(id string) *x509CASlot { @@ -539,6 +545,10 @@ func newX509CASlot(id string) *x509CASlot { } } +func (s *x509CASlot) UpstreamAuthorityID() string { + return s.upstreamAuthorityID +} + func (s *x509CASlot) KmKeyID() string { return x509CAKmKeyID(s.id) } @@ -603,6 +613,10 @@ func (s *jwtKeySlot) AuthorityID() string { return s.authorityID } +func (s *jwtKeySlot) UpstreamAuthorityID() string { + return "" +} + func (s *jwtKeySlot) PublicKey() crypto.PublicKey { if s.jwtKey == nil { return nil diff --git a/pkg/server/ca/manager/slot_test.go b/pkg/server/ca/manager/slot_test.go index d7864fecf7a..4f8eb784d1d 100644 --- a/pkg/server/ca/manager/slot_test.go +++ b/pkg/server/ca/manager/slot_test.go @@ -700,11 +700,12 @@ func TestJournalLoad(t *testing.T) { entries: &journal.Entries{ X509CAs: []*journal.X509CAEntry{ { - SlotId: "A", - IssuedAt: firstIssuedAtUnix, - Certificate: []byte("foo"), - Status: journal.Status_ACTIVE, - AuthorityId: "1", + SlotId: "A", + IssuedAt: firstIssuedAtUnix, + Certificate: []byte("foo"), + Status: journal.Status_ACTIVE, + AuthorityId: "1", + UpstreamAuthorityId: "2", }, }, }, @@ -722,11 +723,12 @@ func TestJournalLoad(t *testing.T) { Level: logrus.ErrorLevel, Message: "X509CA slot failed to load", Data: logrus.Fields{ - logrus.ErrorKey: "unable to parse CA certificate: x509: malformed certificate", - telemetry.IssuedAt: firstIssuedAt.String(), - telemetry.Slot: "A", - telemetry.Status: "ACTIVE", - telemetry.LocalAuthorityID: "1", + logrus.ErrorKey: "unable to parse CA certificate: x509: malformed certificate", + telemetry.IssuedAt: firstIssuedAt.String(), + telemetry.Slot: "A", + telemetry.Status: "ACTIVE", + telemetry.LocalAuthorityID: "1", + telemetry.UpstreamAuthorityID: "2", }, }, }, diff --git a/pkg/server/ca/rotator/rotator.go b/pkg/server/ca/rotator/rotator.go index 17f4ef190d0..923a020ca7c 100644 --- a/pkg/server/ca/rotator/rotator.go +++ b/pkg/server/ca/rotator/rotator.go @@ -22,7 +22,7 @@ const ( type CAManager interface { NotifyBundleLoaded(ctx context.Context) error - NotifyOnBundleUpdate(ctx context.Context) + ProcessBundleUpdates(ctx context.Context) GetCurrentX509CASlot() manager.Slot GetNextX509CASlot() manager.Slot @@ -91,7 +91,7 @@ func (r *Rotator) Run(ctx context.Context) error { func(ctx context.Context) error { // notifyOnBundleUpdate does not fail but rather logs any errors // encountered while notifying - r.c.Manager.NotifyOnBundleUpdate(ctx) + r.c.Manager.ProcessBundleUpdates(ctx) return nil }, ) diff --git a/pkg/server/ca/rotator/rotator_test.go b/pkg/server/ca/rotator/rotator_test.go index b8fb1bb6d89..6361ee810d3 100644 --- a/pkg/server/ca/rotator/rotator_test.go +++ b/pkg/server/ca/rotator/rotator_test.go @@ -413,7 +413,7 @@ func (f *fakeCAManager) NotifyBundleLoaded(context.Context) error { return nil } -func (f *fakeCAManager) NotifyOnBundleUpdate(context.Context) { +func (f *fakeCAManager) ProcessBundleUpdates(context.Context) { } func (f *fakeCAManager) GetCurrentX509CASlot() manager.Slot { diff --git a/pkg/server/ca/upstream_client.go b/pkg/server/ca/upstream_client.go index 125f4dfcfb9..c4bd64f6319 100644 --- a/pkg/server/ca/upstream_client.go +++ b/pkg/server/ca/upstream_client.go @@ -8,6 +8,7 @@ import ( "sync" "time" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" "github.com/spiffe/spire/proto/spire/common" "google.golang.org/grpc/codes" @@ -17,7 +18,7 @@ import ( // BundleUpdater is the interface used by the UpstreamClient to append bundle // updates. type BundleUpdater interface { - AppendX509Roots(ctx context.Context, roots []*x509.Certificate) error + SyncX509Roots(ctx context.Context, roots []*x509certificate.X509Authority) error AppendJWTKeys(ctx context.Context, keys []*common.PublicKey) ([]*common.PublicKey, error) LogError(err error, msg string) } @@ -139,16 +140,22 @@ func (u *UpstreamClient) runMintX509CAStream(ctx context.Context, csr []byte, tt } defer x509RootsStream.Close() + // Extract all root certificates + var x509RootCerts []*x509.Certificate + for _, eachRoot := range x509Roots { + x509RootCerts = append(x509RootCerts, eachRoot.Certificate) + } + // Before we append the roots and return the response, we must first // validate that the minted intermediate can sign a valid, conformant // X509-SVID chain of trust using the provided callback. - if err := validateX509CA(x509CA, x509Roots); err != nil { + if err := validateX509CA(x509CA, x509RootCerts); err != nil { err = status.Errorf(codes.InvalidArgument, "X509 CA minted by upstream authority is invalid: %v", err) firstResultCh <- mintX509CAResult{err: err} return } - if err := u.c.BundleUpdater.AppendX509Roots(ctx, x509Roots); err != nil { + if err := u.c.BundleUpdater.SyncX509Roots(ctx, x509Roots); err != nil { firstResultCh <- mintX509CAResult{err: err} return } @@ -171,7 +178,7 @@ func (u *UpstreamClient) runMintX509CAStream(ctx context.Context, csr []byte, tt return } - if err := u.c.BundleUpdater.AppendX509Roots(ctx, x509Roots); err != nil { + if err := u.c.BundleUpdater.SyncX509Roots(ctx, x509Roots); err != nil { u.c.BundleUpdater.LogError(err, "Failed to store X.509 roots received by the upstream authority plugin.") continue } diff --git a/pkg/server/ca/upstream_client_test.go b/pkg/server/ca/upstream_client_test.go index ee4e75a97a5..4921674c572 100644 --- a/pkg/server/ca/upstream_client_test.go +++ b/pkg/server/ca/upstream_client_test.go @@ -12,6 +12,7 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" upstreamauthorityv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/server/upstreamauthority/v1" plugintypes "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/types" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/server/ca" "github.com/spiffe/spire/pkg/server/credtemplate" "github.com/spiffe/spire/proto/spire/common" @@ -172,20 +173,20 @@ type bundleUpdateErr struct { } type fakeBundleUpdater struct { - x509RootsCh chan []*x509.Certificate + x509RootsCh chan []*x509certificate.X509Authority jwtKeysCh chan []*common.PublicKey errorCh chan bundleUpdateErr } func newFakeBundleUpdater() *fakeBundleUpdater { return &fakeBundleUpdater{ - x509RootsCh: make(chan []*x509.Certificate, 1), + x509RootsCh: make(chan []*x509certificate.X509Authority, 1), jwtKeysCh: make(chan []*common.PublicKey, 1), errorCh: make(chan bundleUpdateErr, 1), } } -func (u *fakeBundleUpdater) AppendX509Roots(ctx context.Context, x509Roots []*x509.Certificate) error { +func (u *fakeBundleUpdater) SyncX509Roots(ctx context.Context, x509Roots []*x509certificate.X509Authority) error { select { case u.x509RootsCh <- x509Roots: return nil @@ -194,7 +195,7 @@ func (u *fakeBundleUpdater) AppendX509Roots(ctx context.Context, x509Roots []*x5 } } -func (u *fakeBundleUpdater) WaitForAppendedX509Roots(t *testing.T) []*x509.Certificate { +func (u *fakeBundleUpdater) WaitForAppendedX509Roots(t *testing.T) []*x509certificate.X509Authority { select { case <-time.After(time.Minute): require.FailNow(t, "timed out waiting for X.509 roots to be appended") diff --git a/pkg/server/cache/dscache/cache.go b/pkg/server/cache/dscache/cache.go index 5f87a499c69..24fd139247b 100644 --- a/pkg/server/cache/dscache/cache.go +++ b/pkg/server/cache/dscache/cache.go @@ -2,7 +2,6 @@ package dscache import ( "context" - "crypto" "sync" "time" @@ -104,15 +103,15 @@ func (ds *DatastoreCache) SetBundle(ctx context.Context, b *common.Bundle) (bund return } -func (ds *DatastoreCache) TaintX509CA(ctx context.Context, trustDomainID string, publicKeyToTaint crypto.PublicKey) (err error) { - if err = ds.DataStore.TaintX509CA(ctx, trustDomainID, publicKeyToTaint); err == nil { +func (ds *DatastoreCache) TaintX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToTaint string) (err error) { + if err = ds.DataStore.TaintX509CA(ctx, trustDomainID, subjectKeyIDToTaint); err == nil { ds.invalidateBundleEntry(trustDomainID) } return } -func (ds *DatastoreCache) RevokeX509CA(ctx context.Context, trustDomainID string, publicKeyToRevoke crypto.PublicKey) (err error) { - if err = ds.DataStore.RevokeX509CA(ctx, trustDomainID, publicKeyToRevoke); err == nil { +func (ds *DatastoreCache) RevokeX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToRevoke string) (err error) { + if err = ds.DataStore.RevokeX509CA(ctx, trustDomainID, subjectKeyIDToRevoke); err == nil { ds.invalidateBundleEntry(trustDomainID) } return diff --git a/pkg/server/catalog/nodeattestor.go b/pkg/server/catalog/nodeattestor.go index 1340d5b3257..fe743889dcb 100644 --- a/pkg/server/catalog/nodeattestor.go +++ b/pkg/server/catalog/nodeattestor.go @@ -6,6 +6,7 @@ import ( "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/awsiid" "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/azuremsi" "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/gcpiit" + "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/httpchallenge" "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/jointoken" "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/k8spsat" "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/k8ssat" @@ -37,6 +38,7 @@ func (repo *nodeAttestorRepository) BuiltIns() []catalog.BuiltIn { awsiid.BuiltIn(), azuremsi.BuiltIn(), gcpiit.BuiltIn(), + httpchallenge.BuiltIn(), jointoken.BuiltIn(), k8spsat.BuiltIn(), k8ssat.BuiltIn(), diff --git a/pkg/server/catalog/upstreamauthority.go b/pkg/server/catalog/upstreamauthority.go index aaf4621b78f..2f77a78384a 100644 --- a/pkg/server/catalog/upstreamauthority.go +++ b/pkg/server/catalog/upstreamauthority.go @@ -7,6 +7,7 @@ import ( "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/awssecret" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/certmanager" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/disk" + "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/ejbca" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/gcpcas" spireplugin "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/spire" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/vault" @@ -39,6 +40,7 @@ func (repo *upstreamAuthorityRepository) BuiltIns() []catalog.BuiltIn { spireplugin.BuiltIn(), disk.BuiltIn(), certmanager.BuiltIn(), + ejbca.BuiltIn(), } } diff --git a/pkg/server/datastore/datastore.go b/pkg/server/datastore/datastore.go index 8b6f2aa0ef8..6cc3cfca5a9 100644 --- a/pkg/server/datastore/datastore.go +++ b/pkg/server/datastore/datastore.go @@ -2,7 +2,6 @@ package datastore import ( "context" - "crypto" "net/url" "time" @@ -25,8 +24,8 @@ type DataStore interface { UpdateBundle(context.Context, *common.Bundle, *common.BundleMask) (*common.Bundle, error) // Keys - TaintX509CA(ctx context.Context, trustDomainID string, publicKeyToTaint crypto.PublicKey) error - RevokeX509CA(ctx context.Context, trustDomainID string, publicKeyToRevoke crypto.PublicKey) error + TaintX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToTaint string) error + RevokeX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToRevoke string) error TaintJWTKey(ctx context.Context, trustDomainID string, authorityID string) (*common.PublicKey, error) RevokeJWTKey(ctx context.Context, trustDomainID string, authorityID string) (*common.PublicKey, error) diff --git a/pkg/server/datastore/sqlstore/migration.go b/pkg/server/datastore/sqlstore/migration.go index c1533227130..058269ffb8b 100644 --- a/pkg/server/datastore/sqlstore/migration.go +++ b/pkg/server/datastore/sqlstore/migration.go @@ -238,7 +238,14 @@ import ( // | v1.9.6 | | | // |*********|********|***************************************************************************| // | v1.10.0 | | | +// |---------| | | // | v1.10.1 | | | +// |---------| | | +// | v1.10.2 | | | +// |---------| | | +// | v1.10.3 | | | +// |---------| | | +// | v1.10.4 | | | // ================================================================================================ const ( @@ -248,7 +255,7 @@ const ( // lastMinorReleaseSchemaVersion is the schema version supported by the // last minor release. When the migrations are opportunistically pruned // from the code after a minor release, this number should be updated. - lastMinorReleaseSchemaVersion = 21 + lastMinorReleaseSchemaVersion = 23 ) // the current code version @@ -460,13 +467,23 @@ func migrateVersion(tx *gorm.DB, currVersion int, log logrus.FieldLogger) (versi // Place all migrations handled by the current minor release here. This // list can be opportunistically pruned after every minor release but won't // break things if it isn't. - switch currVersion { - case 21: - // TODO: remove this migration in 1.9.0 - err = migrateToV22(tx) - case 22: - // TODO: remove this migration in 1.9.0 - err = migrateToV23(tx) + // + // When adding a supported migration to version XX, add a case and the + // corresponding function. The case in the following switch statement will + // look like this: + // + // case XX: + // err = migrateToVXX(tx) + // + // And the migrateToVXX function will be like this: + // func migrateToVXX(tx *gorm.DB) error { + // if err := tx.AutoMigrate(&Foo{}, &Bar{}).Error; err != nil { + // return sqlError.Wrap(err) + // } + // return nil + // } + // + switch currVersion { //nolint: gocritic // No upgrade required yet, keeping switch for future additions default: err = sqlError.New("no migration support for unknown schema version %d", currVersion) } @@ -477,20 +494,6 @@ func migrateVersion(tx *gorm.DB, currVersion int, log logrus.FieldLogger) (versi return nextVersion, nil } -func migrateToV22(tx *gorm.DB) error { - if err := tx.AutoMigrate(&RegisteredEntryEvent{}, &AttestedNodeEvent{}).Error; err != nil { - return sqlError.Wrap(err) - } - return nil -} - -func migrateToV23(tx *gorm.DB) error { - if err := tx.AutoMigrate(&CAJournal{}).Error; err != nil { - return sqlError.Wrap(err) - } - return nil -} - func addFederatedRegistrationEntriesRegisteredEntryIDIndex(tx *gorm.DB) error { // GORM creates the federated_registration_entries implicitly with a primary // key tuple (bundle_id, registered_entry_id). Unfortunately, MySQL5 does diff --git a/pkg/server/datastore/sqlstore/migration_test.go b/pkg/server/datastore/sqlstore/migration_test.go index 9ee21c6f476..2f11bade86c 100644 --- a/pkg/server/datastore/sqlstore/migration_test.go +++ b/pkg/server/datastore/sqlstore/migration_test.go @@ -17,78 +17,6 @@ var ( // pristine database created by a SPIRE release that runs that schema // version. migrationDumps = map[int]string{ - 21: ` - PRAGMA foreign_keys=OFF; - BEGIN TRANSACTION; - CREATE TABLE IF NOT EXISTS "federated_registration_entries" ("bundle_id" integer,"registered_entry_id" integer, PRIMARY KEY ("bundle_id","registered_entry_id")); - CREATE TABLE IF NOT EXISTS "bundles" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"trust_domain" varchar(255) NOT NULL,"data" blob ); - INSERT INTO bundles VALUES(1,'2022-06-17 19:03:03.009646389+00:00','2022-06-17 19:58:07.693138279+00:00','spiffe://test.bloomberg.com',X'0a1b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d12ac030aa903308201a53082014aa00302010202101dbec4c288d719c3b1e4c1eec6b0ff07300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139303235335a170d3232303631373139303930335a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d0301070342000463d466afb748ca43e17bc48c60df703c61544d37ee3db2c9198f6b95e3ae03bb60ebf2d9fcecc1c571ce3a2073ef6437f13fdb58221bc912a5a3826bb7f1236da36a3068300e0603551d0f0101ff040403020186300f0603551d130101ff040530030101ff301d0603551d0e041604147dd4d080dfa6b6a702ec678c3a70664f7d0e2bbd30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020349003046022100adb7b80596f7539b49c58c612519baf6dbc91740d55d917b4b28be9b1a10ec74022100cb4098315d0f29f28bbd1e975dcc74dc4cd129a308fba0950b68ce757f7666ee12ac030aa903308201a53082014aa00302010202100fcbc5319eb905653dfb9495655bb57c300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139303630315a170d3232303631373139313231315a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d030107034200049c4213df3d4ececdbd1651d3a7eafdb062cea691fdbfa114af8a66f83385a9e08b9b0a8893ff7b6b234e2ed14d19b3f0912b3535f109abbf5945f9424b8355d5a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414481208308831170cf0b56126554b4ae6619343c830260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020349003046022100b5b2677fcc3f799aaac63bc22d03e41ac9502354f3e79bc7332b26d2ab9df24602210090aa4afa1cd0e5f1abd9d39aca2515e3d9c5421b192066bd76ec4a589e952f5712aa030aa703308201a33082014aa0030201020210530d057ad2bbb05a01816c7838fa85be300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139303930325a170d3232303631373139313531325a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004c26e10c947bb87c3061793a9438a43a5b9e674fca49b94b561a8e4fd9e15d62e7b7144a3e4f7c8f78f794b39e44760b3c6c006cbf767be3aa7294b5822fcf7b5a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414d8abb8207f9152640cb0a5744b7bc8c5d7e2264730260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d04030203470030440220724460ef6272e33fd91bffca6c3855afa54781c4d32280d23a17c469480c40ab0220055303a13b35f08743ad1b67745ffd9c56e611fda7dcef6b3e9f2dce59ca590f12ab030aa803308201a43082014ba0030201020211008ce3ff7d3b9dfe8e4feba790282c0e1a300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139313231325a170d3232303631373139313832325a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004d1808631f0caffc0d25c4d8a6e7c1a110487e2ffd2ecf28e66663263f490d7503cd3039b6047655c98206f4697cd19ef03a6230e506555c320ab72b119a4105fa36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e0416041449d69ba2b790245ec9d1843510b38c0c78598afa30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d040302034700304402205a733e62b071d94e6938dc4b4e4171996137bcd4a753a819f54c76f06da4961e022003de02a47780f307a452722800d16e579b15f04517732b205a6d4220d1b5e23412ad030aaa03308201a63082014ba003020102021100c02589802a8ded21d33235733b8a1e99300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139313532315a170d3232303631373139323133315a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d0301070342000483902bbdd8a6cd4a571e1a8c1784a050e214f1c9ae8db313496412cef6fb85a5df0d7e2949d1b1501bce8b6d2c8d6016e1982fb31def84bfab8325baca92ca7ea36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414b4320070ec91faacf8e59887f2a5a839bd86741a30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d04030203490030460221009b4cf53f8e1eab14c39625bb6a2a68e30029808fe0e28efa0e4d81627b28816e022100a5b975c7902a26a9aa2251d0286f346e291bcd33c7f2aa1a53eeb1f8571d066a12ac030aa903308201a53082014ba003020102021100f921e3ce510fe7865f18bab76c332221300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139323635375a170d3232303631373139333330375a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004fc9060c9c42a9890c0e77c2160fad90491eb2b72a7fbb9e4178ba36bb2659ec60996135f855fa447a4ddb5c049f8a7c41dd1b21889ccdada31558d2e0f9509d9a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414062be283d174a4cf600cfb141bda849bbcdf8a3b30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020348003045022100deb384211ed707d6586406fd11d6339ba69d650ccc5780758547ed394dbab24a02202df262fb29d7bdba7ea68f59847cd7562aaf937d075e3bc63a961ce2914487d412ab030aa803308201a43082014aa003020102021070f3ce762335b82ecb6131963f3fef02300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139333030365a170d3232303631373139333631365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004accdb39e3519326f7675ca3f40b4eebd697650bc13ccc18a661915a75809bba841028dbca7399a4776f908ae710d620a16df450a0287b5a2d5ab6bc5b508ce00a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414914f8fc7aeb504c95b918b17730aab0074f92cc630260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020348003045022100dd37ef7953b808e5f797a1f51cd18de0bf53714b35e0419ab9e9e2a6ddfd4b2a02203dc345e25274608d6c3a61d063016bde9f5fd1ed4734550b562beb34aa1590e812aa030aa703308201a33082014aa003020102021040370380fc498b6750c034d3bef106ce300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139333330365a170d3232303631373139333931365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004ba53192a0199f27a5c870ac6e3799ccd1b80c9ea559d943bb5ea60f74f68dd12911416bd8f359d92a81fe79031e006fed3d20d9bcd64859bf33c666c136412f3a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e041604144c2039bd70c9e40026ef875b4d8d813d36b33bcd30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020347003044022017f0c5904844069f307ce3b09ba741974c2999b769ff4cb6708b3085e604bdf5022024eabd358e255176e89ef66f0803d6a10967b01f64761f257535f2895ebdfac412ab030aa803308201a43082014aa003020102021034777ea2c3a639f1d949f045b2cc8037300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139333630365a170d3232303631373139343231365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d0301070342000487fe486f685f4dd4d67e89201cfa8ffaa6e63a20f4f7f5f4ef56a3d7bf85f45b2ef72642e6ef65e6b83d9f588838e3f780d4f71d199e1c4e1ca41396ebadff44a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e0416041473c570d4cc2e2c514c7ffd14f51ffe35df5b167730260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d040302034800304502201dd2c058926d7467ffc82fdfdf30fcb22353997e23a11e3d643a4ec773678235022100fcfa2bbc7321d7ef395af90668617b1df26cc8f0df279087aa436585b16b8c4d12ac030aa903308201a53082014ba0030201020211008882a558c4bf6daffd47e4922e1eee65300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139333930365a170d3232303631373139343531365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004466a39e286f532a88a28b521133d2283922b4f84eb7e2cfd0e57f6122703c4b436f834d6a03f6d7165eaf7791380606f395f56a0116e0cf35596f9056037a15ea36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e041604145e1384e437c6564373a830464ff9c87fefe90aff30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020348003045022009d5600c3e7d1ebc3002d745510d9958bfa92c9bd28d50aa670fac2937c1a78c0221009877463d1e34fbf8d29d6018111d996f89a5a0cfc0c4aeb885189b41cd5ba13912aa030aa703308201a33082014aa00302010202106ca146ff27eb8c68148cea38f2b35348300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139343230365a170d3232303631373139343831365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004c8198488e5b71e4032059d587b5f00053b8443997bdeeb24f5051b93079be2cfb6ae0b141861dcfdc2824ecca60a6c4709b13685c5324e0a9d39e7dd988c8f32a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414d54ae88cb867f1408d1f9f1ce6508f417c7e501a30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020347003044022056e6148ab3456b65b16a6fcfd250242d94298c858806771310fcc9361b0a5af302204f687005b50dacfb4639ea9e58be29e829019b9fd784b8741b85ee3856fd2b0b12ac030aa903308201a53082014ba003020102021100ede6e41679c5127ba61e7c8e873d36d1300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139343530365a170d3232303631373139353131365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004783691288c48d54a9d5cc02c0b57fa1c5a8b4a60cd9037e8ee45a5e77075c058830ddc62f5a6c3f27d85cf3972392bdc1bdb9a2d0bd9e63566d305e1db4ee9d7a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e041604140207a872660e36b39b53bb53bdb47f6e5e3d96c730260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d04030203480030450220056d677e08750138028b82295693bbf6b90b3a2b635a6721e1811240f17f7260022100e56a40b657938765c69a24a57f4e6781edebaa0bf9d66518c6a3c0e7c39b45b512ab030aa803308201a43082014aa003020102021014ffe6d2db14882d9711ffbc4da33bfb300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139343830365a170d3232303631373139353431365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d030107034200042c983894bdd014a268d0f41c3a8565dfce7d0997caaaa90ed327fa787ce06594619262ee32099d10fc36eed46146fb5e48784c7b4fe2d4c1d057e2760298bc07a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e041604141f76ab0bc863176ff6ae86b70b3d2b1fe6078b0330260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d040302034800304502204df0f787d1434d7e87a2be669396eaef4bc92c1c14a1152720390cdd12685fee022100fef26cc35eb6f066a5629031b6597a8dc1c9e594e061d07b08310910d1fd799012ab030aa803308201a43082014aa00302010202101a93b7c8613892f615638e41dc451abb300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139353131365a170d3232303631373139353732365a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004caebebddcc0ac5cba37c463cec69460675cc469711084d011a198aa3c176dc8dc381d646372da7db26516bcc80a8b34181705f7af61b0df2afff23b298d34d8aa36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e04160414b8b00dfd89275169097f379fdc8dbf0d53a6b0d830260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020348003045022064ee7573b8d6504aba6350f1be2fc93b0927626fae7dc4fb0a3fc8bffc6af1a6022100d7260176c7407018f7e175b77c93b34a8886849dce6e60e6b1fba851d6a22b0c12ac030aa903308201a53082014ba003020102021100a77b7862dd568b2d16ec26a58e9bab1d300a06082a8648ce3d040302301e310b3009060355040613025553310f300d060355040a1306535049464645301e170d3232303631373139353735375a170d3232303631373230303430375a301e310b3009060355040613025553310f300d060355040a13065350494646453059301306072a8648ce3d020106082a8648ce3d03010703420004d2250d660fb9987fdb11c6ccb3fd4d5894029253bb12808d564028aaf7e2c1b5f624e1b7d1331770e60eba9342e4aa3588d6550e66f7f92c7d2d756b1a26c7e5a36a3068300e0603551d0f0101ff040403020106300f0603551d130101ff040530030101ff301d0603551d0e041604146d7e6694715642ab9da9c42438f22af3a96ae20f30260603551d11041f301d861b7370696666653a2f2f746573742e626c6f6f6d626572672e636f6d300a06082a8648ce3d0403020348003045022100bd8ee3833c9e21becace0356017857d6de80a7b9fd3591f6f45632f9f4dd306802203f2a802a8006537d652e8729d8356206f104679955777bd60bed73948df1ff801a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004ad9db8b77cdb9a8d987ba6bb374d6ff302757b038abbbe97364170a595e087e25c5dd082a5c184c17b1a24df905788c57c997c2ac7b64acc759ccbe40a74efb412206b324d626541386e7842516a4745656d6b74784768716a50454b386856534d5618cfa2b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d0301070342000422a504324c223867a686eb5a04903f312d1c81c644d5ff02ba80649287e5253020386ee6d5dacd9e2398f29259b5ef51956aa5dd664f340d4b543392c2ecbc1712204d6749487a7178635158424b6b51746d4a7a536b4851374a6b675a72666d556a188ba4b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004594df0d913c3bdf5034e25cde0560e60e73e452e5debd38d2dc9c4aff4fbaed9475a3f873a972c5f153a6fa45c9bb66775c13bf2bb493fe3a30ab4c57c09dd7d12207644626f50355356477275634c4445725a3949416741316b36444b5a656e7a6818c0a5b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004032645c85153ab2b3a47bfe92d946356a74c71a173e2271df488143df18630f509a30442579c6399b3ed4cb6acc3961a28c823c64967b331942790d8dcbe921a1220486b414d723930436b424e4a6d746262524f5953576a456f514c667652304e6418fea6b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004e9275c7180571a4265657cb42aaf6fdcf6ef89b328e02fff513e197734ad7d533185ebc27cd4f09850fb95a7ff001496e9f5e4efe56d3b76d490bd02b9857628122042473370687742507278757534707451667131795574754e303863667a55335818bba8b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d0301070342000485d08ac889f7499d30c53c220bb76793fd9f3e7bbc487b24772bc46109e4bc578747226078032c8e57e0ea7855aa9502906b368f61ea44a503e5dedc5d14679c1220555a51625170446d3161424b5a39516165666b7246625338635471394173716618f3adb395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004ced4a54b22caaaed69fbd15cb139f35b0ed09804a3b97ba8ce91d1e744060ba525a9874a80b32e4bfbcbf1ae0979b23cf2b86050f55cae15cf55207606bf15d412205647397a68384f4153784f78494443496f4e725365373944657664454171526718b0afb395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d030107034200044c2ef4a4ffbd9e62ce32e11cd005e5933d43a6962eaea2a4443de5df71ea1e72235d0f5f52c29a0760d8cfc5095cbaec8473f02d2172f264c1eda57f331901b61220513264374377616a76366e5a6b664e367258676e6d504c57585970577969794818e4b0b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004f88dc8f97cb1a65a14e73fa96fee48719ed18f5c2ea85c6df48f8abcf9fc455636da7a2fc4642c199da04932595b1a12fd231a11f75e78e6d8ebe95458e6eea4122061466d6e624c6d44625458516465366a7a684a646d5a4d79447341695047797618a2b2b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d0301070342000470a7d4cb7f0ad669f32d30c99ac990c101ef9bb62af5e74521c17845cb87ac686c3f880a0a00cd784d0e079029092d94ac16579562e22723afb03dae8607587512205343643653756c59614d6a4d613458414b7957656e623967337758464f79327818d6b3b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004dc4f1818d94528551c626b3a24b278ad06d94a613ab43835156dcfa769536e76ca45b758fffea89968b6e3d0316b0be64b8dee0bf7481a560b4136797aeb7b5a12204c4148356d3158384b36693770557948424662457674663543707a49547034611894b5b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d030107034200048cabb93b4b5708b2ad135d06bb4ddf71630bfa86690f3e1cc20bbda31f727d3bd9bd3208a193225d221c7f600eaef75b646737813a09dc42df8d639de21f8e20122030576579575663755557474c71544c7148454c676f705556676a747352336c5818c8b6b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d0301070342000408b261f4fc9d49957510866d15c01e8118f614763e7b42ced56cb095e15f67c85ccbe1ada1cecacadeaba2dd315bbe6f1742d95ceae049782cccf681539328d512206d33675263627a7244556a687a6b336c42493731526476524b30357554354c4c18fcb7b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d03010703420004aae4e1ac654a75de259da99da146cfc5de6778c21641153f166083d5d9a3cc5e09b4e860ad08fa0b1078f302793703897924c875e3498d80f4b62cdb9e544f171220465573666146665037446f4f486b43706830576a63304f35554659684165753718bab9b395061a85010a5b3059301306072a8648ce3d020106082a8648ce3d030107034200046534262ad8cb1025fdb6e8dc962407e87e04a36dd0e0c07ced4d94fa5493026d55cc34666fc1db03698738396ed58e4563feadd5eea449bd5433afae32bf1f6f1220726a334b3470316658506b766476635a444c537066757337503137457830497518b7bcb39506'); - CREATE TABLE IF NOT EXISTS "attested_node_entries" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"spiffe_id" varchar(255),"data_type" varchar(255),"serial_number" varchar(255),"expires_at" datetime,"new_serial_number" varchar(255),"new_expires_at" datetime , "can_reattest" bool); - CREATE TABLE IF NOT EXISTS "node_resolver_map_entries" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"spiffe_id" varchar(255),"type" varchar(255),"value" varchar(255) ); - CREATE TABLE IF NOT EXISTS "registered_entries" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"entry_id" varchar(255),"spiffe_id" varchar(255),"parent_id" varchar(255),"ttl" integer,"admin" bool,"downstream" bool,"expiry" bigint,"revision_number" bigint,"store_svid" bool , "hint" varchar(255), "jwt_svid_ttl" integer); - CREATE TABLE IF NOT EXISTS "join_tokens" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"token" varchar(255),"expiry" bigint ); - CREATE TABLE IF NOT EXISTS "selectors" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"registered_entry_id" integer,"type" varchar(255),"value" varchar(255) ); - CREATE TABLE IF NOT EXISTS "migrations" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"version" integer,"code_version" varchar(255) ); - INSERT INTO migrations VALUES(1,'2023-06-06 12:16:04.285757-03:00','2023-06-06 12:16:04.285757-03:00',21,'1.7.0-dev-unk'); - CREATE TABLE IF NOT EXISTS "dns_names" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"registered_entry_id" integer,"value" varchar(255) ); - CREATE TABLE IF NOT EXISTS "federated_trust_domains" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"trust_domain" varchar(255) NOT NULL,"bundle_endpoint_url" varchar(255),"bundle_endpoint_profile" varchar(255),"endpoint_spiffe_id" varchar(255),"implicit" bool ); - DELETE FROM sqlite_sequence; - INSERT INTO sqlite_sequence VALUES('migrations',1); - INSERT INTO sqlite_sequence VALUES('bundles',1); - CREATE UNIQUE INDEX uix_bundles_trust_domain ON "bundles"(trust_domain) ; - CREATE INDEX idx_attested_node_entries_expires_at ON "attested_node_entries"(expires_at) ; - CREATE UNIQUE INDEX uix_attested_node_entries_spiffe_id ON "attested_node_entries"(spiffe_id) ; - CREATE UNIQUE INDEX idx_node_resolver_map ON "node_resolver_map_entries"(spiffe_id, "type", "value") ; - CREATE INDEX idx_registered_entries_spiffe_id ON "registered_entries"(spiffe_id) ; - CREATE INDEX idx_registered_entries_parent_id ON "registered_entries"(parent_id) ; - CREATE INDEX idx_registered_entries_expiry ON "registered_entries"("expiry") ; - CREATE INDEX idx_registered_entries_hint ON "registered_entries"("hint") ; - CREATE UNIQUE INDEX uix_registered_entries_entry_id ON "registered_entries"(entry_id) ; - CREATE UNIQUE INDEX uix_join_tokens_token ON "join_tokens"("token") ; - CREATE INDEX idx_selectors_type_value ON "selectors"("type", "value") ; - CREATE UNIQUE INDEX idx_selector_entry ON "selectors"(registered_entry_id, "type", "value") ; - CREATE UNIQUE INDEX idx_dns_entry ON "dns_names"(registered_entry_id, "value") ; - CREATE UNIQUE INDEX uix_federated_trust_domains_trust_domain ON "federated_trust_domains"(trust_domain) ; - CREATE INDEX idx_federated_registration_entries_registered_entry_id ON "federated_registration_entries"(registered_entry_id) ; - COMMIT; - `, - 22: ` - PRAGMA foreign_keys=OFF; - BEGIN TRANSACTION; - CREATE TABLE IF NOT EXISTS "federated_registration_entries" ("bundle_id" integer,"registered_entry_id" integer, PRIMARY KEY ("bundle_id","registered_entry_id")); - CREATE TABLE IF NOT EXISTS "bundles" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"trust_domain" varchar(255) NOT NULL,"data" blob ); - INSERT INTO bundles VALUES(1,'2023-08-29 13:35:31.53235-03:00','2023-08-29 13:35:31.613672-03:00','spiffe://example.org',X'0a147370696666653a2f2f6578616d706c652e6f726712df030adc03308201d83082015ea0030201020214449db4c88cda977653f4d5e4770aec9b4b1e970c300a06082a8648ce3d040304301e310b3009060355040613025553310f300d060355040a0c06535049464645301e170d3233303531353032303530365a170d3238303531333032303530365a301e310b3009060355040613025553310f300d060355040a0c065350494646453076301006072a8648ce3d020106052b8104002203620004f57073b72f16fdec785ebd117735018227bfa2475a51385e485d0f42f540693b1768fd49ef2bf40e195ac38e48ec2bfd1cfdb51ce98cc48959d177aab0e97db0ce47e7b1c1416bb46c83577f0e2375e1dd079be4d57c8dc81410c5e5294b1867a35d305b301d0603551d0e04160414928ae360c6aaa7cf6aff8d1716b0046aa61c10ff300f0603551d130101ff040530030101ff300e0603551d0f0101ff04040302010630190603551d1104123010860e7370696666653a2f2f6c6f63616c300a06082a8648ce3d0403040368003065023100e7843c85f844778a95c9cc1b2cdcce9bf1d0ae9d67d7e6b6c5cf3c894d37e8530f6a7711d4f2ea82c3833df5b2b6d75102300a2287548b879888c6bdf88dab55b8fc80ec490059f484b2c4177403997b463e9011b3da82f8a6e29254eee45a6293641a85010a5b3059301306072a8648ce3d020106082a8648ce3d030107034200046e2ff496b28acab3401f9e9a1687bf0c0415872a6ae09ac567e846517fbad9557bc5ce4c564227bc305d5cffda6d1a8d8bf167ec8f50e8530d1f0f03a34a794d12207932527a3534416f43494834386b63436342535244524e7a486348796852576b18d3dfbda7062801'); - CREATE TABLE IF NOT EXISTS "attested_node_entries" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"spiffe_id" varchar(255),"data_type" varchar(255),"serial_number" varchar(255),"expires_at" datetime,"new_serial_number" varchar(255),"new_expires_at" datetime,"can_reattest" bool ); - CREATE TABLE IF NOT EXISTS "attested_node_entries_events" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"spiffe_id" varchar(255) ); - CREATE TABLE IF NOT EXISTS "node_resolver_map_entries" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"spiffe_id" varchar(255),"type" varchar(255),"value" varchar(255) ); - CREATE TABLE IF NOT EXISTS "registered_entries" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"entry_id" varchar(255),"spiffe_id" varchar(255),"parent_id" varchar(255),"ttl" integer,"admin" bool,"downstream" bool,"expiry" bigint,"revision_number" bigint,"store_svid" bool,"hint" varchar(255),"jwt_svid_ttl" integer ); - CREATE TABLE IF NOT EXISTS "registered_entries_events" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"entry_id" varchar(255) ); - CREATE TABLE IF NOT EXISTS "join_tokens" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"token" varchar(255),"expiry" bigint ); - CREATE TABLE IF NOT EXISTS "selectors" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"registered_entry_id" integer,"type" varchar(255),"value" varchar(255) ); - CREATE TABLE IF NOT EXISTS "migrations" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"version" integer,"code_version" varchar(255) ); - INSERT INTO migrations VALUES(1,'2023-08-29 13:35:31.510799-03:00','2023-08-29 13:35:31.510799-03:00',22,'1.7.2'); - CREATE TABLE IF NOT EXISTS "dns_names" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"registered_entry_id" integer,"value" varchar(255) ); - CREATE TABLE IF NOT EXISTS "federated_trust_domains" ("id" integer primary key autoincrement,"created_at" datetime,"updated_at" datetime,"trust_domain" varchar(255) NOT NULL,"bundle_endpoint_url" varchar(255),"bundle_endpoint_profile" varchar(255),"endpoint_spiffe_id" varchar(255),"implicit" bool ); - DELETE FROM sqlite_sequence; - INSERT INTO sqlite_sequence VALUES('migrations',1); - INSERT INTO sqlite_sequence VALUES('bundles',1); - CREATE UNIQUE INDEX uix_bundles_trust_domain ON "bundles"(trust_domain) ; - CREATE INDEX idx_attested_node_entries_expires_at ON "attested_node_entries"(expires_at) ; - CREATE UNIQUE INDEX uix_attested_node_entries_spiffe_id ON "attested_node_entries"(spiffe_id) ; - CREATE UNIQUE INDEX idx_node_resolver_map ON "node_resolver_map_entries"(spiffe_id, "type", "value") ; - CREATE INDEX idx_registered_entries_spiffe_id ON "registered_entries"(spiffe_id) ; - CREATE INDEX idx_registered_entries_parent_id ON "registered_entries"(parent_id) ; - CREATE INDEX idx_registered_entries_expiry ON "registered_entries"("expiry") ; - CREATE INDEX idx_registered_entries_hint ON "registered_entries"("hint") ; - CREATE UNIQUE INDEX uix_registered_entries_entry_id ON "registered_entries"(entry_id) ; - CREATE UNIQUE INDEX uix_join_tokens_token ON "join_tokens"("token") ; - CREATE INDEX idx_selectors_type_value ON "selectors"("type", "value") ; - CREATE UNIQUE INDEX idx_selector_entry ON "selectors"(registered_entry_id, "type", "value") ; - CREATE UNIQUE INDEX idx_dns_entry ON "dns_names"(registered_entry_id, "value") ; - CREATE UNIQUE INDEX uix_federated_trust_domains_trust_domain ON "federated_trust_domains"(trust_domain) ; - CREATE INDEX idx_federated_registration_entries_registered_entry_id ON "federated_registration_entries"(registered_entry_id) ; - COMMIT; - `, 23: ` PRAGMA foreign_keys=OFF; BEGIN TRANSACTION; diff --git a/pkg/server/datastore/sqlstore/mysql.go b/pkg/server/datastore/sqlstore/mysql.go index ca1d0ae8d65..8e626330f18 100644 --- a/pkg/server/datastore/sqlstore/mysql.go +++ b/pkg/server/datastore/sqlstore/mysql.go @@ -5,9 +5,11 @@ import ( "crypto/x509" "errors" "os" + "strings" "github.com/go-sql-driver/mysql" "github.com/jinzhu/gorm" + "github.com/sirupsen/logrus" "github.com/spiffe/spire/pkg/server/datastore/sqldriver/awsrds" // gorm mysql `cloudsql` dialect, for GCP @@ -18,7 +20,9 @@ import ( _ "github.com/jinzhu/gorm/dialects/mysql" ) -type mysqlDB struct{} +type mysqlDB struct { + logger logrus.FieldLogger +} const ( tlsConfigName = "spireCustomTLS" @@ -61,6 +65,10 @@ func (my mysqlDB) connect(cfg *configuration, isReadOnly bool) (db *gorm.DB, ver return nil, "", false, err } + if strings.HasPrefix(version, "5.7.") { + my.logger.Warn("MySQL 5.7 is no longer officially supported, and SPIRE does not guarantee compatibility with MySQL 5.7. Consider upgrading to a newer version of MySQL.") + } + supportsCTE, err = my.supportsCTE(db) if err != nil { return nil, "", false, err @@ -122,7 +130,6 @@ func configureConnection(cfg *configuration, isReadOnly bool) (*mysql.Config, er if len(cfg.RootCAPath) > 0 { rootCertPool := x509.NewCertPool() pem, err := os.ReadFile(cfg.RootCAPath) - if err != nil { return nil, errors.New("invalid mysql config: cannot find Root CA defined in root_ca_path") } diff --git a/pkg/server/datastore/sqlstore/sqlstore.go b/pkg/server/datastore/sqlstore/sqlstore.go index 685707dcfb2..f1645e7bd49 100644 --- a/pkg/server/datastore/sqlstore/sqlstore.go +++ b/pkg/server/datastore/sqlstore/sqlstore.go @@ -3,7 +3,6 @@ package sqlstore import ( "bytes" "context" - "crypto" "crypto/x509" "database/sql" "errors" @@ -25,9 +24,9 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire-api-sdk/proto/spire/api/types" "github.com/spiffe/spire/pkg/common/bundleutil" - "github.com/spiffe/spire/pkg/common/cryptoutil" "github.com/spiffe/spire/pkg/common/protoutil" "github.com/spiffe/spire/pkg/common/telemetry" + "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/pkg/server/datastore" "github.com/spiffe/spire/proto/private/server/journal" "github.com/spiffe/spire/proto/spire/common" @@ -37,17 +36,19 @@ import ( "google.golang.org/protobuf/proto" ) -var sqlError = errs.Class("datastore-sql") -var validEntryIDChars = &unicode.RangeTable{ - R16: []unicode.Range16{ - {0x002d, 0x002e, 1}, // - | . - {0x0030, 0x0039, 1}, // [0-9] - {0x0041, 0x005a, 1}, // [A-Z] - {0x005f, 0x005f, 1}, // _ - {0x0061, 0x007a, 1}, // [a-z] - }, - LatinOffset: 5, -} +var ( + sqlError = errs.Class("datastore-sql") + validEntryIDChars = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002d, 0x002e, 1}, // - | . + {0x0030, 0x0039, 1}, // [0-9] + {0x0041, 0x005a, 1}, // [A-Z] + {0x005f, 0x005f, 1}, // _ + {0x0061, 0x007a, 1}, // [a-z] + }, + LatinOffset: 5, + } +) const ( PluginName = "sql" @@ -246,16 +247,16 @@ func (ds *Plugin) PruneBundle(ctx context.Context, trustDomainID string, expires } // TaintX509CAByKey taints an X.509 CA signed using the provided public key -func (ds *Plugin) TaintX509CA(ctx context.Context, trustDoaminID string, publicKeyToTaint crypto.PublicKey) error { +func (ds *Plugin) TaintX509CA(ctx context.Context, trustDoaminID string, subjectKeyIDToTaint string) error { return ds.withReadModifyWriteTx(ctx, func(tx *gorm.DB) (err error) { - return taintX509CA(tx, trustDoaminID, publicKeyToTaint) + return taintX509CA(tx, trustDoaminID, subjectKeyIDToTaint) }) } // RevokeX509CA removes a Root CA from the bundle -func (ds *Plugin) RevokeX509CA(ctx context.Context, trustDoaminID string, publicKeyToRevoke crypto.PublicKey) error { +func (ds *Plugin) RevokeX509CA(ctx context.Context, trustDoaminID string, subjectKeyIDToRevoke string) error { return ds.withReadModifyWriteTx(ctx, func(tx *gorm.DB) (err error) { - return revokeX509CA(tx, trustDoaminID, publicKeyToRevoke) + return revokeX509CA(tx, trustDoaminID, subjectKeyIDToRevoke) }) } @@ -509,7 +510,7 @@ func (ds *Plugin) FetchRegistrationEntry(ctx context.Context, // CountRegistrationEntries counts all registrations (pagination available) func (ds *Plugin) CountRegistrationEntries(ctx context.Context, req *datastore.CountRegistrationEntriesRequest) (count int32, err error) { - var actDb = ds.db + actDb := ds.db if req.DataConsistency == datastore.TolerateStale && ds.roDb != nil { actDb = ds.roDb } @@ -1035,7 +1036,9 @@ func (ds *Plugin) openDB(cfg *configuration, isReadOnly bool) (*gorm.DB, string, case isPostgresDbType(cfg.databaseTypeConfig.databaseType): dialect = postgresDB{} case isMySQLDbType(cfg.databaseTypeConfig.databaseType): - dialect = mysqlDB{} + dialect = mysqlDB{ + logger: ds.log, + } default: return nil, "", false, nil, sqlError.New("unsupported database_type: %v", cfg.databaseTypeConfig.databaseType) } @@ -1149,10 +1152,6 @@ func applyBundleMask(model *Bundle, newBundle *common.Bundle, inputMask *common. bundle.SequenceNumber = newBundle.SequenceNumber } - if inputMask.X509TaintedKeys { - bundle.X509TaintedKeys = newBundle.X509TaintedKeys - } - newModel, err := bundleToModel(bundle) if err != nil { return nil, nil, err @@ -1376,33 +1375,37 @@ func pruneBundle(tx *gorm.DB, trustDomainID string, expiry time.Time, log logrus return changed, nil } -func taintX509CA(tx *gorm.DB, trustDomainID string, publicKeyToTaint crypto.PublicKey) error { +func taintX509CA(tx *gorm.DB, trustDomainID string, subjectKeyIDToTaint string) error { bundle, err := getBundle(tx, trustDomainID) if err != nil { return err } - for _, eachTaintedKey := range bundle.X509TaintedKeys { - taintedKey, err := x509.ParsePKIXPublicKey(eachTaintedKey.PublicKey) + found := false + for _, eachRootCA := range bundle.RootCas { + x509CA, err := x509.ParseCertificate(eachRootCA.DerBytes) if err != nil { - return status.Errorf(codes.Internal, "failed to parse tainted Key: %v", err) + return status.Errorf(codes.Internal, "failed to parse rootCA: %v", err) } - ok, err := cryptoutil.PublicKeyEqual(taintedKey, publicKeyToTaint) - if err != nil { - return status.Errorf(codes.Internal, "failed to compare public key: %v", err) + caSubjectKeyID := x509util.SubjectKeyIDToString(x509CA.SubjectKeyId) + if subjectKeyIDToTaint != caSubjectKeyID { + continue } - if ok { + + if eachRootCA.TaintedKey { return status.Errorf(codes.InvalidArgument, "root CA is already tainted") } + + found = true + eachRootCA.TaintedKey = true } - pKey, err := x509.MarshalPKIXPublicKey(publicKeyToTaint) - if err != nil { - return status.Errorf(codes.InvalidArgument, "failed to marshal public key to taint: %v", err) + if !found { + return status.Error(codes.NotFound, "no ca found with provided subject key ID") } - bundle.X509TaintedKeys = append(bundle.X509TaintedKeys, &common.X509TaintedKey{PublicKey: pKey}) + bundle.SequenceNumber++ _, err = updateBundle(tx, bundle, nil) if err != nil { @@ -1412,41 +1415,13 @@ func taintX509CA(tx *gorm.DB, trustDomainID string, publicKeyToTaint crypto.Publ return nil } -func revokeX509CA(tx *gorm.DB, trustDomainID string, publicKeyToRevoke crypto.PublicKey) error { +func revokeX509CA(tx *gorm.DB, trustDomainID string, subjectKeyIDToRevoke string) error { bundle, err := getBundle(tx, trustDomainID) if err != nil { return err } - var taintedKeyFound bool - var taintedKeys []*common.X509TaintedKey - - for _, eachTaintedKey := range bundle.X509TaintedKeys { - taintedKey, err := x509.ParsePKIXPublicKey(eachTaintedKey.PublicKey) - if err != nil { - return status.Errorf(codes.Internal, "failed to parse tainted Key: %v", err) - } - - ok, err := cryptoutil.PublicKeyEqual(taintedKey, publicKeyToRevoke) - if err != nil { - return status.Errorf(codes.Internal, "failed to compare public key: %v", err) - } - if ok { - taintedKeyFound = true - continue - } - - taintedKeys = append(taintedKeys, eachTaintedKey) - } - - if !taintedKeyFound { - return status.Error(codes.InvalidArgument, "it is not possible to revoke an untainted root CA") - } - bundle.X509TaintedKeys = taintedKeys - - // It is possible to keep bundles on journal, when there is no upstream authority, - // this code will be used to remove a CA bundle that is persisted on datastore, - // only in case it is found + keyFound := false var rootCAs []*common.Certificate for _, ca := range bundle.RootCas { cert, err := x509.ParseCertificate(ca.DerBytes) @@ -1454,17 +1429,24 @@ func revokeX509CA(tx *gorm.DB, trustDomainID string, publicKeyToRevoke crypto.Pu return status.Errorf(codes.Internal, "failed to parse root CA: %v", err) } - ok, err := cryptoutil.PublicKeyEqual(cert.PublicKey, publicKeyToRevoke) - if err != nil { - return status.Errorf(codes.Internal, "failed to compare public key: %v", err) - } - - if ok { + caSubjectKeyID := x509util.SubjectKeyIDToString(cert.SubjectKeyId) + if subjectKeyIDToRevoke == caSubjectKeyID { + if !ca.TaintedKey { + return status.Error(codes.InvalidArgument, "it is not possible to revoke an untainted root CA") + } + keyFound = true continue } + rootCAs = append(rootCAs, ca) } + + if !keyFound { + return status.Error(codes.NotFound, "no root CA found with provided subject key ID") + } + bundle.RootCas = rootCAs + bundle.SequenceNumber++ if _, err := updateBundle(tx, bundle, nil); err != nil { return status.Errorf(codes.Internal, "failed to update bundle: %v", err) @@ -1503,6 +1485,7 @@ func taintJWTKey(tx *gorm.DB, trustDomainID string, authorityID string) (*common return nil, status.Error(codes.NotFound, "no JWT Key found with provided key ID") } + bundle.SequenceNumber++ if _, err := updateBundle(tx, bundle, nil); err != nil { return nil, err } @@ -1542,6 +1525,7 @@ func revokeJWTKey(tx *gorm.DB, trustDomainID string, authorityID string) (*commo return nil, status.Error(codes.NotFound, "no JWT Key found with provided key ID") } + bundle.SequenceNumber++ if _, err := updateBundle(tx, bundle, nil); err != nil { return nil, err } @@ -2946,7 +2930,7 @@ func buildListRegistrationEntriesQuery(dbType string, supportsCTE bool, req *dat func buildListRegistrationEntriesQuerySQLite3(req *datastore.ListRegistrationEntriesRequest) (string, []any, error) { builder := new(strings.Builder) filtered, args, err := appendListRegistrationEntriesFilterQuery("\nWITH listing AS (\n", builder, SQLite, req) - var downstream = false + downstream := false if req.ByDownstream != nil { downstream = *req.ByDownstream } @@ -3041,7 +3025,7 @@ func buildListRegistrationEntriesQueryPostgreSQL(req *datastore.ListRegistration builder := new(strings.Builder) filtered, args, err := appendListRegistrationEntriesFilterQuery("\nWITH listing AS (\n", builder, PostgreSQL, req) - var downstream = false + downstream := false if req.ByDownstream != nil { downstream = *req.ByDownstream } @@ -3180,7 +3164,7 @@ LEFT JOIN `) filtered, args, err := appendListRegistrationEntriesFilterQuery("WHERE E.id IN (\n", builder, MySQL, req) - var downstream = false + downstream := false if req.ByDownstream != nil { downstream = *req.ByDownstream } @@ -3208,7 +3192,7 @@ func buildListRegistrationEntriesQueryMySQLCTE(req *datastore.ListRegistrationEn builder := new(strings.Builder) filtered, args, err := appendListRegistrationEntriesFilterQuery("\nWITH listing AS (\n", builder, MySQL, req) - var downstream = false + downstream := false if req.ByDownstream != nil { downstream = *req.ByDownstream } @@ -3321,7 +3305,6 @@ func countRegistrationEntries(ctx context.Context, db *sqlDB, _ logrus.FieldLogg for { resp, err := listRegistrationEntriesOnce(ctx, db.raw, db.databaseType, db.supportsCTE, listReq) - if err != nil { return -1, err } @@ -4418,7 +4401,7 @@ func validateRegistrationEntry(entry *common.RegistrationEntry) error { return sqlError.New("invalid request: missing registered entry") } - if entry.Selectors == nil || len(entry.Selectors) == 0 { + if len(entry.Selectors) == 0 { return sqlError.New("invalid registration entry: missing selector list") } @@ -4479,8 +4462,7 @@ func validateRegistrationEntryForUpdate(entry *common.RegistrationEntry, mask *c return sqlError.New("invalid request: missing registered entry") } - if (mask == nil || mask.Selectors) && - (entry.Selectors == nil || len(entry.Selectors) == 0) { + if (mask == nil || mask.Selectors) && len(entry.Selectors) == 0 { return sqlError.New("invalid registration entry: missing selector list") } diff --git a/pkg/server/datastore/sqlstore/sqlstore_test.go b/pkg/server/datastore/sqlstore/sqlstore_test.go index 34bce546731..979469298c4 100644 --- a/pkg/server/datastore/sqlstore/sqlstore_test.go +++ b/pkg/server/datastore/sqlstore/sqlstore_test.go @@ -27,6 +27,7 @@ import ( "github.com/spiffe/spire/pkg/common/protoutil" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/util" + "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/pkg/server/datastore" "github.com/spiffe/spire/proto/private/server/journal" "github.com/spiffe/spire/proto/spire/common" @@ -641,148 +642,147 @@ func (s *PluginSuite) TestBundlePrune() { func (s *PluginSuite) TestTaintX509CA() { t := s.T() - // Setup - unusedKey := testkey.NewEC256(t) - // Tainted public key on raw format - certPublicKeyRaw, err := x509.MarshalPKIXPublicKey(s.cert.PublicKey) - require.NoError(t, err) - - // Create new bundle with two certs - bundle := bundleutil.BundleProtoFromRootCAs("spiffe://foo", []*x509.Certificate{s.cert, s.cacert}) - bundle.X509TaintedKeys = []*common.X509TaintedKey{ - {PublicKey: []byte("foh")}, - } + skID := x509util.SubjectKeyIDToString(s.cert.SubjectKeyId) - // Bundle not found - err = s.ds.TaintX509CA(ctx, "spiffe://foo", unusedKey.Public()) - spiretest.RequireGRPCStatus(t, err, codes.NotFound, _notFoundErrMsg) + t.Run("bundle not found", func(t *testing.T) { + err := s.ds.TaintX509CA(ctx, "spiffe://foo", "foo") + spiretest.RequireGRPCStatus(t, err, codes.NotFound, _notFoundErrMsg) + }) - _, err = s.ds.CreateBundle(ctx, bundle) + // Create Malformed CA + bundle := bundleutil.BundleProtoFromRootCAs("spiffe://foo", []*x509.Certificate{{Raw: []byte("bar")}}) + _, err := s.ds.CreateBundle(ctx, bundle) require.NoError(t, err) - // Bundle contains a malformed tainted key - err = s.ds.TaintX509CA(ctx, "spiffe://foo", unusedKey.Public()) - spiretest.RequireGRPCStatusHasPrefix(t, err, codes.Internal, "failed to parse tainted Key:") + t.Run("bundle not found", func(t *testing.T) { + err := s.ds.TaintX509CA(ctx, "spiffe://foo", "foo") + spiretest.RequireGRPCStatus(t, err, codes.Internal, "failed to parse rootCA: x509: malformed certificate") + }) - // Remove malformed tainted key - bundle.X509TaintedKeys = []*common.X509TaintedKey{} - _, err = s.ds.UpdateBundle(ctx, bundle, nil) - require.NoError(t, err) + validateBundle := func(expectSequenceNumber uint64) { + expectedRootCAs := []*common.Certificate{ + {DerBytes: s.cert.Raw, TaintedKey: true}, + {DerBytes: s.cacert.Raw}, + } - // Invalid public key to taint provided - err = s.ds.TaintX509CA(ctx, "spiffe://foo", unusedKey) - spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "failed to marshal public key to taint: x509: unsupported public key type: *ecdsa.PrivateKey") + fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") + require.NoError(t, err) + require.Equal(t, expectedRootCAs, fetchedBundle.RootCas) + require.Equal(t, expectSequenceNumber, fetchedBundle.SequenceNumber) + } - // Taint successfully - err = s.ds.TaintX509CA(ctx, "spiffe://foo", s.cert.PublicKey) + // Update bundle + bundle = bundleutil.BundleProtoFromRootCAs("spiffe://foo", []*x509.Certificate{s.cert, s.cacert}) + _, err = s.ds.UpdateBundle(ctx, bundle, nil) require.NoError(t, err) - fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") - require.NoError(t, err) + t.Run("taint successfully", func(t *testing.T) { + err := s.ds.TaintX509CA(ctx, "spiffe://foo", skID) + require.NoError(t, err) - expectedRootCAs := []*common.Certificate{ - {DerBytes: s.cert.Raw}, - {DerBytes: s.cacert.Raw}, - } + validateBundle(1) + }) - require.Equal(t, expectedRootCAs, fetchedBundle.RootCas) + t.Run("no bundle with provided skID", func(t *testing.T) { + // Not able to taint a tainted CA + err := s.ds.TaintX509CA(ctx, "spiffe://foo", "foo") + spiretest.RequireGRPCStatus(t, err, codes.NotFound, "no ca found with provided subject key ID") - expectedTaintedKeys := []*common.X509TaintedKey{ - {PublicKey: certPublicKeyRaw}, - } - require.Equal(t, expectedTaintedKeys, fetchedBundle.X509TaintedKeys) + // Validate than sequence number is not incremented + validateBundle(1) + }) - // Not able to taint a tainted CA - err = s.ds.TaintX509CA(ctx, "spiffe://foo", s.cert.PublicKey) - spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "root CA is already tainted") + t.Run("failed to taint already tainted ca", func(t *testing.T) { + // Not able to taint a tainted CA + err := s.ds.TaintX509CA(ctx, "spiffe://foo", skID) + spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "root CA is already tainted") + + // Validate than sequence number is not incremented + validateBundle(1) + }) } func (s *PluginSuite) TestRevokeX509CA() { t := s.T() - // Setup - unusedKey := testkey.NewRSA2048(t) + // SubjectKeyID + certID := x509util.SubjectKeyIDToString(s.cert.SubjectKeyId) - caCertPublicKeyRaw, err := x509.MarshalPKIXPublicKey(s.cacert.PublicKey) - require.NoError(t, err) - - // Tainted public key on raw format - certPublicKeyRaw, err := x509.MarshalPKIXPublicKey(s.cert.PublicKey) - require.NoError(t, err) + // Bundle not found + t.Run("bundle not found", func(t *testing.T) { + err := s.ds.RevokeX509CA(ctx, "spiffe://foo", "foo") + spiretest.RequireGRPCStatus(t, err, codes.NotFound, _notFoundErrMsg) + }) + // Create new bundle with two cert (one valid and one expired) keyForMalformedCert := testkey.NewEC256(t) malformedX509 := &x509.Certificate{ PublicKey: keyForMalformedCert.PublicKey, Raw: []byte("no a certificate"), } - - // Create new bundle with two cert (one valid and one expired) bundle := bundleutil.BundleProtoFromRootCAs("spiffe://foo", []*x509.Certificate{s.cert, s.cacert, malformedX509}) - bundle.X509TaintedKeys = []*common.X509TaintedKey{ - {PublicKey: []byte("foh")}, - } - - // Bundle not found - err = s.ds.RevokeX509CA(ctx, "spiffe://foo", unusedKey.Public()) - spiretest.RequireGRPCStatus(t, err, codes.NotFound, _notFoundErrMsg) - - _, err = s.ds.CreateBundle(ctx, bundle) + _, err := s.ds.CreateBundle(ctx, bundle) require.NoError(t, err) - // Bundle contains a malformed tainted key - err = s.ds.RevokeX509CA(ctx, "spiffe://foo", unusedKey.PublicKey) - spiretest.RequireGRPCStatusHasPrefix(t, err, codes.Internal, "failed to parse tainted Key:") + t.Run("Bundle contains a malformed certificate", func(t *testing.T) { + err := s.ds.RevokeX509CA(ctx, "spiffe://foo", "foo") + spiretest.RequireGRPCStatusHasPrefix(t, err, codes.Internal, "failed to parse root CA: x509: malformed certificate") + }) - // Remove malformed tainted key - bundle.X509TaintedKeys = []*common.X509TaintedKey{} + // Remove malformed certificate + bundle = bundleutil.BundleProtoFromRootCAs("spiffe://foo", []*x509.Certificate{s.cert, s.cacert}) _, err = s.ds.UpdateBundle(ctx, bundle, nil) require.NoError(t, err) - // // No root CA is using provided key - // err = s.ds.RevokeX509CA(ctx, "spiffe://foo", unusedKey.PublicKey) - // spiretest.RequireGRPCStatus(t, err, codes.NotFound, "no root CA found with provided public key") - - // No able to revoke untainted bundles - err = s.ds.RevokeX509CA(ctx, "spiffe://foo", s.cert.PublicKey) - spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "it is not possible to revoke an untainted root CA") + originalBundles := []*common.Certificate{ + {DerBytes: s.cert.Raw}, + {DerBytes: s.cacert.Raw}, + } - // Mark cert as tainted - bundle.X509TaintedKeys = []*common.X509TaintedKey{ - {PublicKey: certPublicKeyRaw}, - {PublicKey: caCertPublicKeyRaw}, + validateBundle := func(expectedRootCAs []*common.Certificate, expectSequenceNumber uint64) { + fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") + require.NoError(t, err) + require.Equal(t, expectedRootCAs, fetchedBundle.RootCas) + require.Equal(t, expectSequenceNumber, fetchedBundle.SequenceNumber) } - _, err = s.ds.UpdateBundle(ctx, bundle, nil) - require.NoError(t, err) - // Bundle contains a malformed root CA - err = s.ds.RevokeX509CA(ctx, "spiffe://foo", s.cert.PublicKey) - spiretest.RequireGRPCStatus(t, err, codes.Internal, "failed to parse root CA: x509: malformed certificate") + t.Run("No root CA is using provided skID", func(t *testing.T) { + err := s.ds.RevokeX509CA(ctx, "spiffe://foo", "foo") + spiretest.RequireGRPCStatus(t, err, codes.NotFound, "no root CA found with provided subject key ID") - // Remove malformed root CA - bundle.RootCas = []*common.Certificate{ - {DerBytes: s.cert.Raw}, - {DerBytes: s.cacert.Raw}, - } - _, err = s.ds.UpdateBundle(ctx, bundle, nil) - require.NoError(t, err) + validateBundle(originalBundles, 0) + }) - // Revoke successfully - err = s.ds.RevokeX509CA(ctx, "spiffe://foo", s.cert.PublicKey) - require.NoError(t, err) + t.Run("Unable to revoke untainted bundles", func(t *testing.T) { + err := s.ds.RevokeX509CA(ctx, "spiffe://foo", certID) + spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "it is not possible to revoke an untainted root CA") - fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") + validateBundle(originalBundles, 0) + }) + + // Mark cert as tainted + err = s.ds.TaintX509CA(ctx, "spiffe://foo", certID) require.NoError(t, err) - expectedRootCAs := []*common.Certificate{ - {DerBytes: s.cacert.Raw}, - } - require.Equal(t, expectedRootCAs, fetchedBundle.RootCas) + t.Run("Revoke successfully", func(t *testing.T) { + taintedBundles := []*common.Certificate{ + {DerBytes: s.cert.Raw, TaintedKey: true}, + {DerBytes: s.cacert.Raw}, + } + // Validating precondition, with 2 bundles and sequence + validateBundle(taintedBundles, 1) - expectedTaintedKeys := []*common.X509TaintedKey{ - {PublicKey: caCertPublicKeyRaw}, - } - require.Equal(t, expectedTaintedKeys, fetchedBundle.X509TaintedKeys) + // Revoke + err = s.ds.RevokeX509CA(ctx, "spiffe://foo", certID) + require.NoError(t, err) + + // CA is removed and sequence incremented + expectedRootCAs := []*common.Certificate{ + {DerBytes: s.cacert.Raw}, + } + validateBundle(expectedRootCAs, 2) + }) } func (s *PluginSuite) TestTaintJWTKey() { @@ -790,11 +790,12 @@ func (s *PluginSuite) TestTaintJWTKey() { // Setup // Create new bundle with two JWT Keys bundle := bundleutil.BundleProtoFromRootCAs("spiffe://foo", nil) - bundle.JwtSigningKeys = []*common.PublicKey{ + originalKeys := []*common.PublicKey{ {Kid: "key1"}, {Kid: "key2"}, {Kid: "key2"}, } + bundle.JwtSigningKeys = originalKeys // Bundle not found publicKey, err := s.ds.TaintJWTKey(ctx, "spiffe://foo", "key1") @@ -814,25 +815,37 @@ func (s *PluginSuite) TestTaintJWTKey() { spiretest.RequireGRPCStatus(t, err, codes.NotFound, "no JWT Key found with provided key ID") require.Nil(t, publicKey) + validateBundle := func(expectedKeys []*common.PublicKey, expectSequenceNumber uint64) { + fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") + require.NoError(t, err) + + spiretest.RequireProtoListEqual(t, expectedKeys, fetchedBundle.JwtSigningKeys) + require.Equal(t, expectSequenceNumber, fetchedBundle.SequenceNumber) + } + + // Validate no changes + validateBundle(originalKeys, 0) + // Taint successfully publicKey, err = s.ds.TaintJWTKey(ctx, "spiffe://foo", "key1") require.NoError(t, err) require.NotNil(t, publicKey) - fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") - require.NoError(t, err) - - expectedKeys := []*common.PublicKey{ + taintedKey := []*common.PublicKey{ {Kid: "key1", TaintedKey: true}, {Kid: "key2"}, {Kid: "key2"}, } - require.Equal(t, expectedKeys, fetchedBundle.JwtSigningKeys) + // Validate expected response + validateBundle(taintedKey, 1) // No able to taint Key again publicKey, err = s.ds.TaintJWTKey(ctx, "spiffe://foo", "key1") spiretest.RequireGRPCStatus(t, err, codes.InvalidArgument, "key is already tainted") require.Nil(t, publicKey) + + // No changes + validateBundle(taintedKey, 1) } func (s *PluginSuite) TestRevokeJWTKey() { @@ -878,23 +891,31 @@ func (s *PluginSuite) TestRevokeJWTKey() { require.Nil(t, publicKey) // Remove duplicated key - bundle.JwtSigningKeys = []*common.PublicKey{ + originalKeys := []*common.PublicKey{ {Kid: "key1"}, {Kid: "key2", TaintedKey: true}, } + bundle.JwtSigningKeys = originalKeys _, err = s.ds.UpdateBundle(ctx, bundle, nil) require.NoError(t, err) + validateBundle := func(expectedKeys []*common.PublicKey, expectSequenceNumber uint64) { + fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") + require.NoError(t, err) + + spiretest.RequireProtoListEqual(t, expectedKeys, fetchedBundle.JwtSigningKeys) + require.Equal(t, expectSequenceNumber, fetchedBundle.SequenceNumber) + } + + validateBundle(originalKeys, 0) + // Revoke successfully publicKey, err = s.ds.RevokeJWTKey(ctx, "spiffe://foo", "key2") require.NoError(t, err) require.Equal(t, &common.PublicKey{Kid: "key2", TaintedKey: true}, publicKey) - fetchedBundle, err := s.ds.FetchBundle(ctx, "spiffe://foo") - require.NoError(t, err) - expectedJWTKeys := []*common.PublicKey{{Kid: "key1"}} - require.Equal(t, expectedJWTKeys, fetchedBundle.JwtSigningKeys) + validateBundle(expectedJWTKeys, 1) } func (s *PluginSuite) TestCreateAttestedNode() { @@ -4916,14 +4937,8 @@ func (s *PluginSuite) TestMigration() { switch schemaVersion { // All of these schema versions were migrated by previous versions // of SPIRE server and no longer have migration code. - case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20: + case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22: prepareDB(false) - case 21: - prepareDB(true) - case 22: - prepareDB(true) - case 23: - prepareDB(true) default: t.Fatalf("no migration test added for schema version %d", schemaVersion) } diff --git a/pkg/server/endpoints/authorized_entryfetcher.go b/pkg/server/endpoints/authorized_entryfetcher.go index b5c69dac700..27de3f14f39 100644 --- a/pkg/server/endpoints/authorized_entryfetcher.go +++ b/pkg/server/endpoints/authorized_entryfetcher.go @@ -9,6 +9,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/server/api" "github.com/spiffe/spire/pkg/server/authorizedentries" "github.com/spiffe/spire/pkg/server/datastore" @@ -36,9 +37,9 @@ type eventsBasedCache interface { pruneMissedEvents() } -func NewAuthorizedEntryFetcherWithEventsBasedCache(ctx context.Context, log logrus.FieldLogger, clk clock.Clock, ds datastore.DataStore, cacheReloadInterval, pruneEventsOlderThan, sqlTransactionTimeout time.Duration) (*AuthorizedEntryFetcherWithEventsBasedCache, error) { +func NewAuthorizedEntryFetcherWithEventsBasedCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, clk clock.Clock, ds datastore.DataStore, cacheReloadInterval, pruneEventsOlderThan, sqlTransactionTimeout time.Duration) (*AuthorizedEntryFetcherWithEventsBasedCache, error) { log.Info("Building event-based in-memory entry cache") - cache, registrationEntries, attestedNodes, err := buildCache(ctx, log, ds, clk, sqlTransactionTimeout) + cache, registrationEntries, attestedNodes, err := buildCache(ctx, log, metrics, ds, clk, sqlTransactionTimeout) if err != nil { return nil, err } @@ -111,15 +112,15 @@ func (a *AuthorizedEntryFetcherWithEventsBasedCache) updateCache(ctx context.Con return errors.Join(updateRegistrationEntriesCacheErr, updateAttestedNodesCacheErr) } -func buildCache(ctx context.Context, log logrus.FieldLogger, ds datastore.DataStore, clk clock.Clock, sqlTransactionTimeout time.Duration) (*authorizedentries.Cache, *registrationEntries, *attestedNodes, error) { +func buildCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, sqlTransactionTimeout time.Duration) (*authorizedentries.Cache, *registrationEntries, *attestedNodes, error) { cache := authorizedentries.NewCache(clk) - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, ds, clk, cache, buildCachePageSize, sqlTransactionTimeout) + registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, sqlTransactionTimeout) if err != nil { return nil, nil, nil, err } - attestedNodes, err := buildAttestedNodesCache(ctx, log, ds, clk, cache, sqlTransactionTimeout) + attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, sqlTransactionTimeout) if err != nil { return nil, nil, nil, err } diff --git a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go index 3bd125c736c..6d692eeba74 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go +++ b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go @@ -8,7 +8,9 @@ import ( "github.com/andres-erbsen/clock" "github.com/sirupsen/logrus" + "github.com/spiffe/spire/pkg/common/telemetry" + server_telemetry "github.com/spiffe/spire/pkg/common/telemetry/server" "github.com/spiffe/spire/pkg/server/api" "github.com/spiffe/spire/pkg/server/authorizedentries" "github.com/spiffe/spire/pkg/server/datastore" @@ -17,11 +19,12 @@ import ( ) type attestedNodes struct { - cache *authorizedentries.Cache - clk clock.Clock - ds datastore.DataStore - log logrus.FieldLogger - mu sync.RWMutex + cache *authorizedentries.Cache + clk clock.Clock + ds datastore.DataStore + log logrus.FieldLogger + metrics telemetry.Metrics + mu sync.RWMutex firstEventID uint firstEventTime time.Time @@ -31,8 +34,9 @@ type attestedNodes struct { sqlTransactionTimeout time.Duration } -// buildAttestedNodesCache Fetches all attested nodes and adds the unexpired ones to the cache -func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, sqlTransactionTimeout time.Duration) (*attestedNodes, error) { +// buildAttestedNodesCache fetches all attested nodes and adds the unexpired ones to the cache. +// It runs once at startup. +func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, sqlTransactionTimeout time.Duration) (*attestedNodes, error) { resp, err := ds.ListAttestedNodesEvents(ctx, &datastore.ListAttestedNodesEventsRequest{}) if err != nil { return nil, err @@ -49,6 +53,8 @@ func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, ds dat firstEventID = event.EventID firstEventTime = now } else { + // After getting the first event, search for any gaps in the event stream, from the first event to the last event. + // During each cache refresh cycle, we will check if any of these missed events get populated. for i := lastEventID + 1; i < event.EventID; i++ { missedEvents[i] = now } @@ -79,6 +85,7 @@ func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, ds dat firstEventID: firstEventID, firstEventTime: firstEventTime, log: log, + metrics: metrics, lastEventID: lastEventID, missedEvents: missedEvents, seenMissedStartupEvents: make(map[uint]struct{}), @@ -89,11 +96,10 @@ func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, ds dat // updateCache Fetches all the events since the last time this function was running and updates // the cache with all the changes. func (a *attestedNodes) updateCache(ctx context.Context) error { + // Process events skipped over previously if err := a.missedStartupEvents(ctx); err != nil { a.log.WithError(err).Error("Unable to process missed startup events") } - - // Process events skipped over previously a.replayMissedEvents(ctx) req := &datastore.ListAttestedNodesEventsRequest{ @@ -111,8 +117,8 @@ func (a *attestedNodes) updateCache(ctx context.Context) error { // were skipped over and need to be queued in case they show up later. // This can happen when a long running transaction allocates an event ID but a shorter transaction // comes in after, allocates and commits the ID first. If a read comes in at this moment, the event id for - // the longer running transaction will be skipped over - if !a.firstEventTime.IsZero() && event.EventID != a.lastEventID+1 { + // the longer running transaction will be skipped over. + if !a.firstEventTime.IsZero() { for i := a.lastEventID + 1; i < event.EventID; i++ { a.log.WithField(telemetry.EventID, i).Info("Detected skipped attested node event") a.mu.Lock() @@ -140,9 +146,17 @@ func (a *attestedNodes) updateCache(ctx context.Context) error { a.lastEventID = event.EventID } + // These two should be the same value but it's valuable to have them both be emitted for incident triage. + server_telemetry.SetAgentsByExpiresAtCacheCountGauge(a.metrics, a.cache.Stats().AgentsByExpiresAt) + server_telemetry.SetAgentsByIDCacheCountGauge(a.metrics, a.cache.Stats().AgentsByID) + return nil } +// missedStartupEvents will check for any events that arrive with an ID less than the first event ID we receive. +// For example if the first event ID we receive is 3, this function will check for any IDs less than that. +// If event ID 2 comes in later on, due to a long running transaction, this function will update the cache +// with the information from this event. This function will run until time equal to sqlTransactionTimeout has elapsed after startup. func (a *attestedNodes) missedStartupEvents(ctx context.Context) error { if a.firstEventTime.IsZero() || a.clk.Now().Sub(a.firstEventTime) > a.sqlTransactionTimeout { return nil @@ -182,7 +196,6 @@ func (a *attestedNodes) replayMissedEvents(ctx context.Context) { switch status.Code(err) { case codes.OK: case codes.NotFound: - log.Debug("Event not yet populated in database") continue default: log.WithError(err).Error("Failed to fetch info about missed Attested Node event") @@ -196,6 +209,7 @@ func (a *attestedNodes) replayMissedEvents(ctx context.Context) { delete(a.missedEvents, eventID) } + server_telemetry.SetSkippedNodeEventIDsCacheCountGauge(a.metrics, len(a.missedEvents)) } // updatedCacheEntry update/deletes/creates an individual attested node in the cache. diff --git a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go index 4f9b60d8e9d..d02ebe60ddf 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go +++ b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go @@ -9,62 +9,83 @@ import ( "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/server/authorizedentries" "github.com/spiffe/spire/pkg/server/datastore" "github.com/spiffe/spire/proto/spire/common" "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/fakes/fakedatastore" + "github.com/spiffe/spire/test/fakes/fakemetrics" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestUpdateAttestedNodesCache(t *testing.T) { - ctx := context.Background() - log, _ := test.NewNullLogger() - clk := clock.NewMock(t) - ds := fakedatastore.New(t) - cache := authorizedentries.NewCache(clk) - - attestedNodes, err := buildAttestedNodesCache(ctx, log, ds, clk, cache, defaultSQLTransactionTimeout) - require.NoError(t, err) - require.NotNil(t, attestedNodes) - - agentID, err := spiffeid.FromString("spiffe://example.org/myagent") - require.NoError(t, err) - - _, err = ds.CreateAttestedNode(ctx, &common.AttestedNode{ - SpiffeId: agentID.String(), - CertNotAfter: time.Now().Add(5 * time.Hour).Unix(), - }) - require.NoError(t, err) - for _, tt := range []struct { name string errs []error expectedLastAttestedNodeEventID uint + expectMetrics []fakemetrics.MetricItem }{ { name: "Error Listing Attested Node Events", errs: []error{errors.New("listing attested node events")}, expectedLastAttestedNodeEventID: uint(0), + expectMetrics: nil, }, { name: "Error Fetching Attested Node", errs: []error{nil, errors.New("fetching attested node")}, expectedLastAttestedNodeEventID: uint(0), + expectMetrics: nil, }, { name: "Error Getting Node Selectors", errs: []error{nil, nil, errors.New("getting node selectors")}, expectedLastAttestedNodeEventID: uint(0), + expectMetrics: nil, }, { name: "No Errors", expectedLastAttestedNodeEventID: uint(1), + expectMetrics: []fakemetrics.MetricItem{ + { + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Node, telemetry.AgentsByExpiresAtCache, telemetry.Count}, + Val: 1, + Labels: nil, + }, + { + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Node, telemetry.AgentsByIDCache, telemetry.Count}, + Val: 1, + Labels: nil, + }, + }, }, } { tt := tt t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + log, _ := test.NewNullLogger() + clk := clock.NewMock(t) + ds := fakedatastore.New(t) + cache := authorizedentries.NewCache(clk) + metrics := fakemetrics.New() + + attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, defaultSQLTransactionTimeout) + require.NoError(t, err) + require.NotNil(t, attestedNodes) + + agentID, err := spiffeid.FromString("spiffe://example.org/myagent") + require.NoError(t, err) + + _, err = ds.CreateAttestedNode(ctx, &common.AttestedNode{ + SpiffeId: agentID.String(), + CertNotAfter: time.Now().Add(5 * time.Hour).Unix(), + }) + require.NoError(t, err) + for _, err = range tt.errs { ds.AppendNextError(err) } @@ -77,6 +98,10 @@ func TestUpdateAttestedNodesCache(t *testing.T) { } assert.Equal(t, tt.expectedLastAttestedNodeEventID, attestedNodes.lastEventID) + + if tt.expectMetrics != nil { + assert.Subset(t, metrics.AllMetrics(), tt.expectMetrics) + } }) } } @@ -88,14 +113,15 @@ func TestAttestedNodesCacheMissedEventNotFound(t *testing.T) { clk := clock.NewMock(t) ds := fakedatastore.New(t) cache := authorizedentries.NewCache(clk) + metrics := fakemetrics.New() - attestedNodes, err := buildAttestedNodesCache(ctx, log, ds, clk, cache, defaultSQLTransactionTimeout) + attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, attestedNodes) attestedNodes.missedEvents[1] = clk.Now() attestedNodes.replayMissedEvents(ctx) - require.Equal(t, "Event not yet populated in database", hook.LastEntry().Message) + require.Zero(t, hook.Entries) } func TestAttestedNodesSavesMissedStartupEvents(t *testing.T) { @@ -105,6 +131,7 @@ func TestAttestedNodesSavesMissedStartupEvents(t *testing.T) { clk := clock.NewMock(t) ds := fakedatastore.New(t) cache := authorizedentries.NewCache(clk) + metrics := fakemetrics.New() err := ds.CreateAttestedNodeEventForTesting(ctx, &datastore.AttestedNodeEvent{ EventID: 3, @@ -112,7 +139,7 @@ func TestAttestedNodesSavesMissedStartupEvents(t *testing.T) { }) require.NoError(t, err) - attestedNodes, err := buildAttestedNodesCache(ctx, log, ds, clk, cache, defaultSQLTransactionTimeout) + attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, attestedNodes) require.Equal(t, uint(3), attestedNodes.firstEventID) diff --git a/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go b/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go index 0a7f3bf32ce..570cbad0084 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go +++ b/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go @@ -9,6 +9,7 @@ import ( "github.com/andres-erbsen/clock" "github.com/sirupsen/logrus" "github.com/spiffe/spire/pkg/common/telemetry" + server_telemetry "github.com/spiffe/spire/pkg/common/telemetry/server" "github.com/spiffe/spire/pkg/server/api" "github.com/spiffe/spire/pkg/server/authorizedentries" "github.com/spiffe/spire/pkg/server/datastore" @@ -17,11 +18,12 @@ import ( ) type registrationEntries struct { - cache *authorizedentries.Cache - clk clock.Clock - ds datastore.DataStore - log logrus.FieldLogger - mu sync.RWMutex + cache *authorizedentries.Cache + clk clock.Clock + ds datastore.DataStore + log logrus.FieldLogger + metrics telemetry.Metrics + mu sync.RWMutex firstEventID uint firstEventTime time.Time @@ -32,7 +34,7 @@ type registrationEntries struct { } // buildRegistrationEntriesCache Fetches all registration entries and adds them to the cache -func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, pageSize int32, sqlTransactionTimeout time.Duration) (*registrationEntries, error) { +func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, pageSize int32, sqlTransactionTimeout time.Duration) (*registrationEntries, error) { resp, err := ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) if err != nil { return nil, err @@ -49,6 +51,8 @@ func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, firstEventID = event.EventID firstEventTime = now } else { + // After getting the first event, search for any gaps in the event stream, from the first event to the last event. + // During each cache refresh cycle, we will check if any of these missed events get populated. for i := lastEventID + 1; i < event.EventID; i++ { missedEvents[i] = clk.Now() } @@ -92,6 +96,7 @@ func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, firstEventID: firstEventID, firstEventTime: firstEventTime, log: log, + metrics: metrics, lastEventID: lastEventID, missedEvents: missedEvents, seenMissedStartupEvents: make(map[uint]struct{}), @@ -102,11 +107,10 @@ func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, // updateCache Fetches all the events since the last time this function was running and updates // the cache with all the changes. func (a *registrationEntries) updateCache(ctx context.Context) error { + // Process events skipped over previously if err := a.missedStartupEvents(ctx); err != nil { a.log.WithError(err).Error("Unable to process missed startup events") } - - // Process events skipped over previously a.replayMissedEvents(ctx) req := &datastore.ListRegistrationEntriesEventsRequest{ @@ -124,8 +128,8 @@ func (a *registrationEntries) updateCache(ctx context.Context) error { // were skipped over and need to be queued in case they show up later. // This can happen when a long running transaction allocates an event ID but a shorter transaction // comes in after, allocates and commits the ID first. If a read comes in at this moment, the event id for - // the longer running transaction will be skipped over - if !a.firstEventTime.IsZero() && event.EventID != a.lastEventID+1 { + // the longer running transaction will be skipped over. + if !a.firstEventTime.IsZero() { for i := a.lastEventID + 1; i < event.EventID; i++ { a.log.WithField(telemetry.EventID, i).Info("Detected skipped registration entry event") a.mu.Lock() @@ -153,9 +157,21 @@ func (a *registrationEntries) updateCache(ctx context.Context) error { a.lastEventID = event.EventID } + // These two should be the same value but it's valuable to have them both be emitted for incident triage. + server_telemetry.SetNodeAliasesByEntryIDCacheCountGauge(a.metrics, a.cache.Stats().AliasesByEntryID) + server_telemetry.SetNodeAliasesBySelectorCacheCountGauge(a.metrics, a.cache.Stats().AliasesBySelector) + + // These two should be the same value but it's valuable to have them both be emitted for incident triage. + server_telemetry.SetEntriesByEntryIDCacheCountGauge(a.metrics, a.cache.Stats().EntriesByEntryID) + server_telemetry.SetEntriesByParentIDCacheCountGauge(a.metrics, a.cache.Stats().EntriesByParentID) + return nil } +// missedStartupEvents will check for any events come in with an ID less than the first event ID we receive. +// For example if the first event ID we receive is 3, this function will check for any IDs less than that. +// If event ID 2 comes in later on, due to a long running transaction, this function will update the cache +// with the information from this event. This function will run until time equal to sqlTransactionTimeout has elapsed after startup. func (a *registrationEntries) missedStartupEvents(ctx context.Context) error { if a.firstEventTime.IsZero() || a.clk.Now().Sub(a.firstEventTime) > a.sqlTransactionTimeout { return nil @@ -195,7 +211,6 @@ func (a *registrationEntries) replayMissedEvents(ctx context.Context) { switch status.Code(err) { case codes.OK: case codes.NotFound: - log.Debug("Event not yet populated in database") continue default: log.WithError(err).Error("Failed to fetch info about missed event") @@ -209,6 +224,7 @@ func (a *registrationEntries) replayMissedEvents(ctx context.Context) { delete(a.missedEvents, eventID) } + server_telemetry.SetSkippedEntryEventIDsCacheCountGauge(a.metrics, len(a.missedEvents)) } // updateCacheEntry update/deletes/creates an individual registration entry in the cache. diff --git a/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go b/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go index 1255048fc12..44b0b531eda 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go +++ b/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go @@ -16,6 +16,7 @@ import ( "github.com/spiffe/spire/proto/spire/common" "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/fakes/fakedatastore" + "github.com/spiffe/spire/test/fakes/fakemetrics" "github.com/stretchr/testify/require" ) @@ -70,7 +71,9 @@ func TestBuildRegistrationEntriesCache(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { cache := authorizedentries.NewCache(clk) - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, ds, clk, cache, tt.pageSize, defaultSQLTransactionTimeout) + metrics := fakemetrics.New() + + registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, tt.pageSize, defaultSQLTransactionTimeout) if tt.err != "" { require.ErrorContains(t, err, tt.err) return @@ -104,14 +107,15 @@ func TestRegistrationEntriesCacheMissedEventNotFound(t *testing.T) { clk := clock.NewMock(t) ds := fakedatastore.New(t) cache := authorizedentries.NewCache(clk) + metrics := fakemetrics.New() - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, ds, clk, cache, buildCachePageSize, defaultSQLTransactionTimeout) + registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, registrationEntries) registrationEntries.missedEvents[1] = clk.Now() registrationEntries.replayMissedEvents(ctx) - require.Equal(t, "Event not yet populated in database", hook.LastEntry().Message) + require.Zero(t, len(hook.Entries)) } func TestRegistrationEntriesSavesMissedStartupEvents(t *testing.T) { @@ -121,6 +125,7 @@ func TestRegistrationEntriesSavesMissedStartupEvents(t *testing.T) { clk := clock.NewMock(t) ds := fakedatastore.New(t) cache := authorizedentries.NewCache(clk) + metrics := fakemetrics.New() err := ds.CreateRegistrationEntryEventForTesting(ctx, &datastore.RegistrationEntryEvent{ EventID: 3, @@ -128,7 +133,7 @@ func TestRegistrationEntriesSavesMissedStartupEvents(t *testing.T) { }) require.NoError(t, err) - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, ds, clk, cache, buildCachePageSize, defaultSQLTransactionTimeout) + registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, registrationEntries) require.Equal(t, uint(3), registrationEntries.firstEventID) diff --git a/pkg/server/endpoints/authorized_entryfetcher_test.go b/pkg/server/endpoints/authorized_entryfetcher_test.go index 4cf74e2f038..761ce966ed4 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_test.go +++ b/pkg/server/endpoints/authorized_entryfetcher_test.go @@ -10,10 +10,12 @@ import ( "github.com/sirupsen/logrus/hooks/test" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/common/idutil" + "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/server/datastore" "github.com/spiffe/spire/proto/spire/common" "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/fakes/fakedatastore" + "github.com/spiffe/spire/test/fakes/fakemetrics" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,8 +25,9 @@ func TestNewAuthorizedEntryFetcherWithEventsBasedCache(t *testing.T) { log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) assert.NoError(t, err) assert.NotNil(t, ef) @@ -94,6 +97,21 @@ func TestNewAuthorizedEntryFetcherWithEventsBasedCache(t *testing.T) { entries, err := ef.FetchAuthorizedEntries(ctx, agentID) assert.NoError(t, err) assert.Equal(t, 2, len(entries)) + + // Assert metrics + expectedMetrics := []fakemetrics.MetricItem{ + agentsByIDMetric(1), + agentsByIDExpiresAtMetric(1), + nodeAliasesByEntryIDMetric(1), + nodeAliasesBySelectorMetric(1), + entriesByEntryIDMetric(2), + entriesByParentIDMetric(2), + // Here we have 2 skipped events, one for nodes, one for entries + nodeSkippedEventMetric(0), + entriesSkippedEventMetric(0), + } + + assert.ElementsMatch(t, expectedMetrics, metrics.AllMetrics(), "should emit metrics for node aliases, entries, and agents") } func TestNewAuthorizedEntryFetcherWithEventsBasedCacheErrorBuildingCache(t *testing.T) { @@ -101,13 +119,18 @@ func TestNewAuthorizedEntryFetcherWithEventsBasedCacheErrorBuildingCache(t *test log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() buildErr := errors.New("build error") ds.SetNextError(buildErr) - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) assert.Error(t, err) assert.Nil(t, ef) + + // Assert metrics + expectedMetrics := []fakemetrics.MetricItem{} + assert.ElementsMatch(t, expectedMetrics, metrics.AllMetrics(), "should emit no metrics") } func TestBuildCacheSavesMissedEvents(t *testing.T) { @@ -115,6 +138,7 @@ func TestBuildCacheSavesMissedEvents(t *testing.T) { log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() // Create Registration Entry Events with a gap err := ds.CreateRegistrationEntryEventForTesting(ctx, &datastore.RegistrationEntryEvent{ @@ -142,7 +166,7 @@ func TestBuildCacheSavesMissedEvents(t *testing.T) { }) require.NoError(t, err) - _, registrationEntries, attestedNodes, err := buildCache(ctx, log, ds, clk, defaultSQLTransactionTimeout) + _, registrationEntries, attestedNodes, err := buildCache(ctx, log, metrics, ds, clk, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, registrationEntries) require.NotNil(t, attestedNodes) @@ -153,6 +177,10 @@ func TestBuildCacheSavesMissedEvents(t *testing.T) { assert.Contains(t, attestedNodes.missedEvents, uint(2)) assert.Contains(t, attestedNodes.missedEvents, uint(3)) assert.Equal(t, uint(4), attestedNodes.lastEventID) + + // Assert metrics since the updateCache() method doesn't get called right at built time. + expectedMetrics := []fakemetrics.MetricItem{} + assert.ElementsMatch(t, expectedMetrics, metrics.AllMetrics(), "should emit no metrics") } func TestRunUpdateCacheTaskPrunesExpiredAgents(t *testing.T) { @@ -161,8 +189,9 @@ func TestRunUpdateCacheTaskPrunesExpiredAgents(t *testing.T) { log.SetLevel(logrus.DebugLevel) clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, ef) @@ -226,8 +255,9 @@ func TestUpdateRegistrationEntriesCacheMissedEvents(t *testing.T) { log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, ef) @@ -309,6 +339,7 @@ func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() agentID := spiffeid.RequireFromString("spiffe://example.org/myagent") @@ -345,7 +376,7 @@ func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { require.NoError(t, err) // Create entry fetcher - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, ef) @@ -415,8 +446,9 @@ func TestUpdateAttestedNodesCacheMissedEvents(t *testing.T) { log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, ef) @@ -531,6 +563,7 @@ func TestUpdateAttestedNodesCacheMissedStartupEvents(t *testing.T) { log, _ := test.NewNullLogger() clk := clock.NewMock(t) ds := fakedatastore.New(t) + metrics := fakemetrics.New() agent1 := spiffeid.RequireFromString("spiffe://example.org/myagent1") agent2 := spiffeid.RequireFromString("spiffe://example.org/myagent2") @@ -594,7 +627,7 @@ func TestUpdateAttestedNodesCacheMissedStartupEvents(t *testing.T) { require.NoError(t, err) // Create entry fetcher - ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) + ef, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, log, metrics, clk, ds, defaultCacheReloadInterval, defaultPruneEventsOlderThan, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, ef) @@ -652,3 +685,75 @@ func TestUpdateAttestedNodesCacheMissedStartupEvents(t *testing.T) { require.Equal(t, entry.EntryId, entries[0].Id) require.Equal(t, entry.SpiffeId, idutil.RequireIDProtoString(entries[0].SpiffeId)) } + +// AgentsByIDCacheCount +func agentsByIDMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Node, telemetry.AgentsByIDCache, telemetry.Count}, + Val: val, + Labels: nil} +} + +func agentsByIDExpiresAtMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Node, telemetry.AgentsByExpiresAtCache, telemetry.Count}, + Val: val, + Labels: nil, + } +} + +func nodeAliasesByEntryIDMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Entry, telemetry.NodeAliasesByEntryIDCache, telemetry.Count}, + Val: val, + Labels: nil, + } +} + +func nodeSkippedEventMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Node, telemetry.SkippedNodeEventIDs, telemetry.Count}, + Val: val, + Labels: nil, + } +} + +func nodeAliasesBySelectorMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Entry, telemetry.NodeAliasesBySelectorCache, telemetry.Count}, + Val: val, + Labels: nil, + } +} + +func entriesByEntryIDMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Entry, telemetry.EntriesByEntryIDCache, telemetry.Count}, + Val: val, + Labels: nil, + } +} + +func entriesByParentIDMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Entry, telemetry.EntriesByParentIDCache, telemetry.Count}, + Val: val, + Labels: nil, + } +} + +func entriesSkippedEventMetric(val float32) fakemetrics.MetricItem { + return fakemetrics.MetricItem{ + Type: fakemetrics.SetGaugeType, + Key: []string{telemetry.Entry, telemetry.SkippedEntryEventIDs, telemetry.Count}, + Val: val, + Labels: nil, + } +} diff --git a/pkg/server/endpoints/config.go b/pkg/server/endpoints/config.go index 33b4d747ebf..eaa739f577f 100644 --- a/pkg/server/endpoints/config.go +++ b/pkg/server/endpoints/config.go @@ -20,6 +20,7 @@ import ( debugv1 "github.com/spiffe/spire/pkg/server/api/debug/v1" entryv1 "github.com/spiffe/spire/pkg/server/api/entry/v1" healthv1 "github.com/spiffe/spire/pkg/server/api/health/v1" + localauthorityv1 "github.com/spiffe/spire/pkg/server/api/localauthority/v1" loggerv1 "github.com/spiffe/spire/pkg/server/api/logger/v1" svidv1 "github.com/spiffe/spire/pkg/server/api/svid/v1" trustdomainv1 "github.com/spiffe/spire/pkg/server/api/trustdomain/v1" @@ -56,8 +57,8 @@ type Config struct { // Bundle endpoint configuration BundleEndpoint bundle.EndpointConfig - // JWTKey publisher - JWTKeyPublisher manager.JwtKeyPublisher + // Authority manager + AuthorityManager manager.AuthorityManager // Makes policy decisions AuthPolicyEngine *authpolicy.Engine @@ -154,7 +155,7 @@ func (c *Config) maybeMakeBundleEndpointServer() (Server, func(context.Context) func (c *Config) makeAPIServers(entryFetcher api.AuthorizedEntryFetcher) APIServers { ds := c.Catalog.GetDataStore() - upstreamPublisher := UpstreamPublisher(c.JWTKeyPublisher) + upstreamPublisher := UpstreamPublisher(c.AuthorityManager) return APIServers{ AgentServer: agentv1.New(agentv1.Config{ @@ -200,5 +201,10 @@ func (c *Config) makeAPIServers(entryFetcher api.AuthorizedEntryFetcher) APIServ DataStore: ds, BundleRefresher: c.BundleManager, }), + LocalAUthorityServer: localauthorityv1.New(localauthorityv1.Config{ + TrustDomain: c.TrustDomain, + CAManager: c.AuthorityManager, + DataStore: ds, + }), } } diff --git a/pkg/server/endpoints/endpoints.go b/pkg/server/endpoints/endpoints.go index caf6f017f16..af2db4f908c 100644 --- a/pkg/server/endpoints/endpoints.go +++ b/pkg/server/endpoints/endpoints.go @@ -25,10 +25,12 @@ import ( bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1" debugv1_pb "github.com/spiffe/spire-api-sdk/proto/spire/api/server/debug/v1" entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" loggerv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/logger/v1" svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1" "github.com/spiffe/spire/pkg/common/auth" + "github.com/spiffe/spire/pkg/common/fflag" "github.com/spiffe/spire/pkg/common/peertracker" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/util" @@ -86,14 +88,15 @@ type Endpoints struct { } type APIServers struct { - AgentServer agentv1.AgentServer - BundleServer bundlev1.BundleServer - DebugServer debugv1_pb.DebugServer - EntryServer entryv1.EntryServer - HealthServer grpc_health_v1.HealthServer - LoggerServer loggerv1.LoggerServer - SVIDServer svidv1.SVIDServer - TrustDomainServer trustdomainv1.TrustDomainServer + AgentServer agentv1.AgentServer + BundleServer bundlev1.BundleServer + DebugServer debugv1_pb.DebugServer + EntryServer entryv1.EntryServer + HealthServer grpc_health_v1.HealthServer + LoggerServer loggerv1.LoggerServer + SVIDServer svidv1.SVIDServer + TrustDomainServer trustdomainv1.TrustDomainServer + LocalAUthorityServer localauthorityv1.LocalAuthorityServer } // RateLimitConfig holds rate limiting configurations. @@ -131,7 +134,7 @@ func New(ctx context.Context, c Config) (*Endpoints, error) { var ef api.AuthorizedEntryFetcher var cacheRebuildTask, pruneEventsTask func(context.Context) error if c.EventsBasedCache { - efEventsBasedCache, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, c.Log, c.Clock, ds, c.CacheReloadInterval, c.PruneEventsOlderThan, c.SQLTransactionTimeout) + efEventsBasedCache, err := NewAuthorizedEntryFetcherWithEventsBasedCache(ctx, c.Log, c.Metrics, c.Clock, ds, c.CacheReloadInterval, c.PruneEventsOlderThan, c.SQLTransactionTimeout) if err != nil { return nil, err } @@ -200,6 +203,11 @@ func (e *Endpoints) ListenAndServe(ctx context.Context) error { trustdomainv1.RegisterTrustDomainServer(tcpServer, e.APIServers.TrustDomainServer) trustdomainv1.RegisterTrustDomainServer(udsServer, e.APIServers.TrustDomainServer) + if fflag.IsSet(fflag.FlagForcedRotation) { + localauthorityv1.RegisterLocalAuthorityServer(tcpServer, e.APIServers.LocalAUthorityServer) + localauthorityv1.RegisterLocalAuthorityServer(udsServer, e.APIServers.LocalAUthorityServer) + } + // UDS only loggerv1.RegisterLoggerServer(udsServer, e.APIServers.LoggerServer) grpc_health_v1.RegisterHealthServer(udsServer, e.APIServers.HealthServer) diff --git a/pkg/server/endpoints/endpoints_test.go b/pkg/server/endpoints/endpoints_test.go index 883e89b33b0..a6b6823fb13 100644 --- a/pkg/server/endpoints/endpoints_test.go +++ b/pkg/server/endpoints/endpoints_test.go @@ -19,10 +19,12 @@ import ( bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1" debugv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/debug/v1" entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" + localauthorityv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/localauthority/v1" loggerv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/logger/v1" svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1" "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + "github.com/spiffe/spire/pkg/common/fflag" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/pkg/server/authpolicy" "github.com/spiffe/spire/pkg/server/ca/manager" @@ -94,7 +96,7 @@ func TestNew(t *testing.T) { Catalog: cat, ServerCA: serverCA, BundleEndpoint: bundle.EndpointConfig{Address: tcpAddr}, - JWTKeyPublisher: &fakeJWTKeyPublisher{}, + AuthorityManager: &fakeAuthorityManager{}, Log: log, RootLog: log, Metrics: metrics, @@ -115,6 +117,7 @@ func TestNew(t *testing.T) { assert.NotNil(t, endpoints.APIServers.LoggerServer) assert.NotNil(t, endpoints.APIServers.SVIDServer) assert.NotNil(t, endpoints.BundleEndpointServer) + assert.NotNil(t, endpoints.APIServers.LocalAUthorityServer) assert.NotNil(t, endpoints.EntryFetcherPruneEventsTask) assert.Equal(t, cat.GetDataStore(), endpoints.DataStore) assert.Equal(t, log, endpoints.Log) @@ -151,7 +154,6 @@ func TestNewErrorCreatingAuthorizedEntryFetcher(t *testing.T) { Catalog: cat, ServerCA: serverCA, BundleEndpoint: bundle.EndpointConfig{Address: tcpAddr}, - JWTKeyPublisher: &fakeJWTKeyPublisher{}, Log: log, Metrics: metrics, RateLimit: rateLimit, @@ -164,6 +166,8 @@ func TestNewErrorCreatingAuthorizedEntryFetcher(t *testing.T) { } func TestListenAndServe(t *testing.T) { + require.NoError(t, fflag.Load(fflag.RawConfig{"forced_rotation"})) + ctx := context.Background() ca := testca.New(t, testTD) federatedCA := testca.New(t, foreignFederatedTD) @@ -206,14 +210,15 @@ func TestListenAndServe(t *testing.T) { DataStore: ds, BundleCache: bundle.NewCache(ds, clk), APIServers: APIServers{ - AgentServer: agentServer{}, - BundleServer: bundleServer{}, - DebugServer: debugServer{}, - EntryServer: entryServer{}, - HealthServer: healthServer{}, - LoggerServer: loggerServer{}, - SVIDServer: svidServer{}, - TrustDomainServer: trustDomainServer{}, + AgentServer: agentServer{}, + BundleServer: bundleServer{}, + DebugServer: debugServer{}, + EntryServer: entryServer{}, + HealthServer: healthServer{}, + LoggerServer: loggerServer{}, + SVIDServer: svidServer{}, + TrustDomainServer: trustDomainServer{}, + LocalAUthorityServer: localAuthorityServer{}, }, BundleEndpointServer: bundleEndpointServer, Log: log, @@ -321,6 +326,10 @@ func TestListenAndServe(t *testing.T) { testTrustDomainAPI(ctx, t, conns) }) + t.Run("LocalAuthority", func(t *testing.T) { + testLocalAuthorityAPI(ctx, t, conns) + }) + t.Run("Access denied to remote caller", func(t *testing.T) { testRemoteCaller(ctx, t, target) }) @@ -885,6 +894,110 @@ func testTrustDomainAPI(ctx context.Context, t *testing.T, conns testConns) { }) } +func testLocalAuthorityAPI(ctx context.Context, t *testing.T, conns testConns) { + t.Run("Local", func(t *testing.T) { + testAuthorization(ctx, t, localauthorityv1.NewLocalAuthorityClient(conns.local), map[string]bool{ + "GetJWTAuthorityState": true, + "PrepareJWTAuthority": true, + "ActivateJWTAuthority": true, + "TaintJWTAuthority": true, + "RevokeJWTAuthority": true, + "GetX509AuthorityState": true, + "PrepareX509Authority": true, + "ActivateX509Authority": true, + "TaintX509Authority": true, + "TaintX509UpstreamAuthority": true, + "RevokeX509Authority": true, + "RevokeX509UpstreamAuthority": true, + }) + }) + + t.Run("NoAuth", func(t *testing.T) { + testAuthorization(ctx, t, localauthorityv1.NewLocalAuthorityClient(conns.noAuth), map[string]bool{ + "GetJWTAuthorityState": false, + "PrepareJWTAuthority": false, + "ActivateJWTAuthority": false, + "TaintJWTAuthority": false, + "RevokeJWTAuthority": false, + "GetX509AuthorityState": false, + "PrepareX509Authority": false, + "ActivateX509Authority": false, + "TaintX509Authority": false, + "TaintX509UpstreamAuthority": false, + "RevokeX509Authority": false, + "RevokeX509UpstreamAuthority": false, + }) + }) + + t.Run("Agent", func(t *testing.T) { + testAuthorization(ctx, t, localauthorityv1.NewLocalAuthorityClient(conns.agent), map[string]bool{ + "GetJWTAuthorityState": false, + "PrepareJWTAuthority": false, + "ActivateJWTAuthority": false, + "TaintJWTAuthority": false, + "RevokeJWTAuthority": false, + "GetX509AuthorityState": false, + "PrepareX509Authority": false, + "ActivateX509Authority": false, + "TaintX509Authority": false, + "TaintX509UpstreamAuthority": false, + "RevokeX509Authority": false, + "RevokeX509UpstreamAuthority": false, + }) + }) + + t.Run("Admin", func(t *testing.T) { + testAuthorization(ctx, t, localauthorityv1.NewLocalAuthorityClient(conns.admin), map[string]bool{ + "GetJWTAuthorityState": true, + "PrepareJWTAuthority": true, + "ActivateJWTAuthority": true, + "TaintJWTAuthority": true, + "RevokeJWTAuthority": true, + "GetX509AuthorityState": true, + "PrepareX509Authority": true, + "ActivateX509Authority": true, + "TaintX509Authority": true, + "TaintX509UpstreamAuthority": true, + "RevokeX509Authority": true, + "RevokeX509UpstreamAuthority": true, + }) + }) + + t.Run("Federated Admin", func(t *testing.T) { + testAuthorization(ctx, t, localauthorityv1.NewLocalAuthorityClient(conns.federatedAdmin), map[string]bool{ + "GetJWTAuthorityState": true, + "PrepareJWTAuthority": true, + "ActivateJWTAuthority": true, + "TaintJWTAuthority": true, + "RevokeJWTAuthority": true, + "GetX509AuthorityState": true, + "PrepareX509Authority": true, + "ActivateX509Authority": true, + "TaintX509Authority": true, + "TaintX509UpstreamAuthority": true, + "RevokeX509Authority": true, + "RevokeX509UpstreamAuthority": true, + }) + }) + + t.Run("Downstream", func(t *testing.T) { + testAuthorization(ctx, t, localauthorityv1.NewLocalAuthorityClient(conns.downstream), map[string]bool{ + "GetJWTAuthorityState": false, + "PrepareJWTAuthority": false, + "ActivateJWTAuthority": false, + "TaintJWTAuthority": false, + "RevokeJWTAuthority": false, + "GetX509AuthorityState": false, + "PrepareX509Authority": false, + "ActivateX509Authority": false, + "TaintX509Authority": false, + "TaintX509UpstreamAuthority": false, + "RevokeX509Authority": false, + "RevokeX509UpstreamAuthority": false, + }) + }) +} + // testAuthorization issues an RPC for each method on the client interface and // asserts whether the RPC was authorized or not. If a method is not // represented in the expectedAuthResults, or a method in expectedAuthResults @@ -1039,8 +1152,8 @@ func (o *svidObserver) State() svid.State { } } -type fakeJWTKeyPublisher struct { - manager.JwtKeyPublisher +type fakeAuthorityManager struct { + manager.AuthorityManager } type agentServer struct { @@ -1252,3 +1365,55 @@ func (trustDomainServer) BatchDeleteFederationRelationship(_ context.Context, _ func (trustDomainServer) RefreshBundle(_ context.Context, _ *trustdomainv1.RefreshBundleRequest) (*emptypb.Empty, error) { return &emptypb.Empty{}, nil } + +type localAuthorityServer struct { + localauthorityv1.UnsafeLocalAuthorityServer +} + +func (localAuthorityServer) GetJWTAuthorityState(context.Context, *localauthorityv1.GetJWTAuthorityStateRequest) (*localauthorityv1.GetJWTAuthorityStateResponse, error) { + return &localauthorityv1.GetJWTAuthorityStateResponse{}, nil +} + +func (localAuthorityServer) PrepareJWTAuthority(context.Context, *localauthorityv1.PrepareJWTAuthorityRequest) (*localauthorityv1.PrepareJWTAuthorityResponse, error) { + return &localauthorityv1.PrepareJWTAuthorityResponse{}, nil +} + +func (localAuthorityServer) ActivateJWTAuthority(context.Context, *localauthorityv1.ActivateJWTAuthorityRequest) (*localauthorityv1.ActivateJWTAuthorityResponse, error) { + return &localauthorityv1.ActivateJWTAuthorityResponse{}, nil +} + +func (localAuthorityServer) TaintJWTAuthority(context.Context, *localauthorityv1.TaintJWTAuthorityRequest) (*localauthorityv1.TaintJWTAuthorityResponse, error) { + return &localauthorityv1.TaintJWTAuthorityResponse{}, nil +} + +func (localAuthorityServer) RevokeJWTAuthority(context.Context, *localauthorityv1.RevokeJWTAuthorityRequest) (*localauthorityv1.RevokeJWTAuthorityResponse, error) { + return &localauthorityv1.RevokeJWTAuthorityResponse{}, nil +} + +func (localAuthorityServer) GetX509AuthorityState(context.Context, *localauthorityv1.GetX509AuthorityStateRequest) (*localauthorityv1.GetX509AuthorityStateResponse, error) { + return &localauthorityv1.GetX509AuthorityStateResponse{}, nil +} + +func (localAuthorityServer) PrepareX509Authority(context.Context, *localauthorityv1.PrepareX509AuthorityRequest) (*localauthorityv1.PrepareX509AuthorityResponse, error) { + return &localauthorityv1.PrepareX509AuthorityResponse{}, nil +} + +func (localAuthorityServer) ActivateX509Authority(context.Context, *localauthorityv1.ActivateX509AuthorityRequest) (*localauthorityv1.ActivateX509AuthorityResponse, error) { + return &localauthorityv1.ActivateX509AuthorityResponse{}, nil +} + +func (localAuthorityServer) TaintX509Authority(context.Context, *localauthorityv1.TaintX509AuthorityRequest) (*localauthorityv1.TaintX509AuthorityResponse, error) { + return &localauthorityv1.TaintX509AuthorityResponse{}, nil +} + +func (localAuthorityServer) TaintX509UpstreamAuthority(context.Context, *localauthorityv1.TaintX509UpstreamAuthorityRequest) (*localauthorityv1.TaintX509UpstreamAuthorityResponse, error) { + return &localauthorityv1.TaintX509UpstreamAuthorityResponse{}, nil +} + +func (localAuthorityServer) RevokeX509Authority(context.Context, *localauthorityv1.RevokeX509AuthorityRequest) (*localauthorityv1.RevokeX509AuthorityResponse, error) { + return &localauthorityv1.RevokeX509AuthorityResponse{}, nil +} + +func (localAuthorityServer) RevokeX509UpstreamAuthority(context.Context, *localauthorityv1.RevokeX509UpstreamAuthorityRequest) (*localauthorityv1.RevokeX509UpstreamAuthorityResponse, error) { + return &localauthorityv1.RevokeX509UpstreamAuthorityResponse{}, nil +} diff --git a/pkg/server/endpoints/middleware.go b/pkg/server/endpoints/middleware.go index c8df56389d5..9a6d5a6c950 100644 --- a/pkg/server/endpoints/middleware.go +++ b/pkg/server/endpoints/middleware.go @@ -170,6 +170,18 @@ func RateLimits(config RateLimitConfig) map[string]api.RateLimiter { "/spire.api.server.trustdomain.v1.TrustDomain/BatchUpdateFederationRelationship": noLimit, "/spire.api.server.trustdomain.v1.TrustDomain/BatchDeleteFederationRelationship": noLimit, "/spire.api.server.trustdomain.v1.TrustDomain/RefreshBundle": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/GetJWTAuthorityState": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/PrepareJWTAuthority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/ActivateJWTAuthority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/TaintJWTAuthority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/RevokeJWTAuthority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/GetX509AuthorityState": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/PrepareX509Authority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/ActivateX509Authority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/TaintX509Authority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/TaintX509UpstreamAuthority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/RevokeX509Authority": noLimit, + "/spire.api.server.localauthority.v1.LocalAuthority/RevokeX509UpstreamAuthority": noLimit, "/grpc.health.v1.Health/Check": noLimit, "/grpc.health.v1.Health/Watch": noLimit, } diff --git a/pkg/server/plugin/keymanager/awskms/awskms.go b/pkg/server/plugin/keymanager/awskms/awskms.go index 70d9e8b33bf..f2a51e37fe8 100644 --- a/pkg/server/plugin/keymanager/awskms/awskms.go +++ b/pkg/server/plugin/keymanager/awskms/awskms.go @@ -317,10 +317,10 @@ func (p *Plugin) createKey(ctx context.Context, spireKeyID string, keyType keyma } createKeyInput := &kms.CreateKeyInput{ - Description: aws.String(description), - KeyUsage: types.KeyUsageTypeSignVerify, - CustomerMasterKeySpec: keySpec, - Policy: p.keyPolicy, + Description: aws.String(description), + KeyUsage: types.KeyUsageTypeSignVerify, + KeySpec: keySpec, + Policy: p.keyPolicy, } key, err := p.kmsClient.CreateKey(ctx, createKeyInput) @@ -495,7 +495,7 @@ func (p *Plugin) refreshAliases(ctx context.Context) error { } if errs != nil { - return fmt.Errorf(strings.Join(errs, ": ")) + return errors.New(strings.Join(errs, ": ")) } return nil } @@ -594,7 +594,7 @@ func (p *Plugin) disposeAliases(ctx context.Context) error { } if errs != nil { - return fmt.Errorf(strings.Join(errs, ": ")) + return errors.New(strings.Join(errs, ": ")) } return nil @@ -700,7 +700,7 @@ func (p *Plugin) disposeKeys(ctx context.Context) error { } } if errs != nil { - return fmt.Errorf(strings.Join(errs, ": ")) + return errors.New(strings.Join(errs, ": ")) } return nil @@ -908,31 +908,31 @@ func signingAlgorithmForKMS(keyType keymanagerv1.KeyType, signerOpts any) (types } } -func keyTypeFromKeySpec(keySpec types.CustomerMasterKeySpec) (keymanagerv1.KeyType, bool) { +func keyTypeFromKeySpec(keySpec types.KeySpec) (keymanagerv1.KeyType, bool) { switch keySpec { - case types.CustomerMasterKeySpecRsa2048: + case types.KeySpecRsa2048: return keymanagerv1.KeyType_RSA_2048, true - case types.CustomerMasterKeySpecRsa4096: + case types.KeySpecRsa4096: return keymanagerv1.KeyType_RSA_4096, true - case types.CustomerMasterKeySpecEccNistP256: + case types.KeySpecEccNistP256: return keymanagerv1.KeyType_EC_P256, true - case types.CustomerMasterKeySpecEccNistP384: + case types.KeySpecEccNistP384: return keymanagerv1.KeyType_EC_P384, true default: return keymanagerv1.KeyType_UNSPECIFIED_KEY_TYPE, false } } -func keySpecFromKeyType(keyType keymanagerv1.KeyType) (types.CustomerMasterKeySpec, bool) { +func keySpecFromKeyType(keyType keymanagerv1.KeyType) (types.KeySpec, bool) { switch keyType { case keymanagerv1.KeyType_RSA_2048: - return types.CustomerMasterKeySpecRsa2048, true + return types.KeySpecRsa2048, true case keymanagerv1.KeyType_RSA_4096: - return types.CustomerMasterKeySpecRsa4096, true + return types.KeySpecRsa4096, true case keymanagerv1.KeyType_EC_P256: - return types.CustomerMasterKeySpecEccNistP256, true + return types.KeySpecEccNistP256, true case keymanagerv1.KeyType_EC_P384: - return types.CustomerMasterKeySpecEccNistP384, true + return types.KeySpecEccNistP384, true default: return "", false } diff --git a/pkg/server/plugin/keymanager/awskms/awskms_test.go b/pkg/server/plugin/keymanager/awskms/awskms_test.go index 4145360d74e..c6891560927 100644 --- a/pkg/server/plugin/keymanager/awskms/awskms_test.go +++ b/pkg/server/plugin/keymanager/awskms/awskms_test.go @@ -164,42 +164,42 @@ func TestConfigure(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), }, { AliasName: aws.String(aliasName + "01"), KeyID: aws.String(keyID + "01"), - KeySpec: types.CustomerMasterKeySpecRsa2048, + KeySpec: types.KeySpecRsa2048, Enabled: true, PublicKey: []byte("foo"), }, { AliasName: aws.String(aliasName + "02"), KeyID: aws.String(keyID + "02"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), }, { AliasName: aws.String(aliasName + "03"), KeyID: aws.String(keyID + "03"), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), }, { AliasName: aws.String(aliasName + "04"), KeyID: aws.String(keyID + "04"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), }, { AliasName: aws.String("alias/SPIRE_SERVER/wrong_prefix"), KeyID: aws.String("foo_id"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), }, @@ -297,7 +297,7 @@ func TestConfigure(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa2048, + KeySpec: types.KeySpecRsa2048, Enabled: true, PublicKey: []byte("foo"), }, @@ -328,7 +328,7 @@ func TestConfigure(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), }, @@ -345,7 +345,7 @@ func TestConfigure(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: false, PublicKey: []byte("foo"), }, @@ -440,7 +440,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), AliasLastUpdatedDate: &unixEpoch, @@ -467,7 +467,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/bundle-acme-foo_2ebar_2brsa"), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), AliasLastUpdatedDate: &unixEpoch, @@ -556,7 +556,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), }, @@ -583,7 +583,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), }, @@ -611,7 +611,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), }, @@ -639,7 +639,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), }, @@ -667,7 +667,7 @@ func TestGenerateKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), }, @@ -1095,7 +1095,7 @@ func TestGetPublicKey(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), }, @@ -1108,7 +1108,7 @@ func TestGetPublicKey(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/bundle-acme-foo_2ebar_2brsa"), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), }, @@ -1161,7 +1161,7 @@ func TestGetPublicKeys(t *testing.T) { { AliasName: aws.String(aliasName), KeyID: aws.String(keyID), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), }, @@ -1213,7 +1213,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_01"), KeyID: aws.String("key_id_01"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1228,7 +1228,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_01"), KeyID: aws.String("key_id_01"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1237,7 +1237,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_02"), KeyID: aws.String("key_id_02"), - KeySpec: types.CustomerMasterKeySpecRsa2048, + KeySpec: types.KeySpecRsa2048, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1246,7 +1246,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/another_server_id/id_03"), KeyID: aws.String("key_id_03"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1255,7 +1255,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/another_td/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_04"), KeyID: aws.String("key_id_04"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1264,7 +1264,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/another_td/another_server_id/id_05"), KeyID: aws.String("key_id_05"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1273,7 +1273,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/unrelated"), KeyID: aws.String("key_id_06"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1282,7 +1282,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/unrelated/unrelated/id_07"), KeyID: aws.String("key_id_07"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1291,7 +1291,7 @@ func TestRefreshAliases(t *testing.T) { { AliasName: nil, KeyID: aws.String("key_id_08"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1412,7 +1412,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_01"), KeyID: aws.String("key_id_01"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1421,7 +1421,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_02"), KeyID: aws.String("key_id_02"), - KeySpec: types.CustomerMasterKeySpecRsa2048, + KeySpec: types.KeySpecRsa2048, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1430,7 +1430,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/another_server_id/id_03"), KeyID: aws.String("key_id_03"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1439,7 +1439,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/another_td/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_04"), KeyID: aws.String("key_id_04"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1448,7 +1448,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/another_td/another_server/id_05"), KeyID: aws.String("key_id_05"), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1457,7 +1457,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/unrelated"), KeyID: aws.String("key_id_06"), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1466,7 +1466,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/unrelated/unrelated/id_07"), KeyID: aws.String("key_id_07"), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1475,7 +1475,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: nil, KeyID: aws.String("key_id_08"), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1484,7 +1484,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/another_server_id/id_09"), KeyID: aws.String("key_id_09"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: false, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1532,7 +1532,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_01"), KeyID: aws.String("key_id_01"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1549,7 +1549,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/another_server/id_01"), KeyID: aws.String("key_id_01"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1566,7 +1566,7 @@ func TestDisposeAliases(t *testing.T) { { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/another_server/id_01"), KeyID: aws.String("key_id_01"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1654,7 +1654,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_01"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1664,7 +1664,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_02"), KeyID: aws.String("key_id_02"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa2048, + KeySpec: types.KeySpecRsa2048, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1674,7 +1674,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/another_server_id/id_03"), KeyID: aws.String("key_id_03"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1684,7 +1684,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/another_td/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_04"), KeyID: aws.String("key_id_04"), Description: aws.String("SPIRE_SERVER_KEY/another_td"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1694,7 +1694,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/another_td/another_server_id/id_05"), KeyID: aws.String("key_id_05"), Description: aws.String("SPIRE_SERVER_KEY/another_td"), - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1704,7 +1704,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/unrelated"), KeyID: aws.String("key_id_06"), Description: nil, - KeySpec: types.CustomerMasterKeySpecEccNistP256, + KeySpec: types.KeySpecEccNistP256, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1714,7 +1714,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/unrelated/unrelated/id_07"), KeyID: aws.String("key_id_07"), Description: nil, - KeySpec: types.CustomerMasterKeySpecEccNistP384, + KeySpec: types.KeySpecEccNistP384, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1724,7 +1724,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_08"), Description: nil, - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1734,7 +1734,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: aws.String("alias/SPIRE_SERVER/test_example_org/aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee/id_01"), KeyID: aws.String("key_id_09"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1744,7 +1744,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_10"), Description: aws.String("SPIRE_SERVER_KEY/another_td"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1754,7 +1754,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_11"), Description: aws.String("SPIRE_SERVER_KEY/"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1764,7 +1764,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_12"), Description: aws.String("SPIRE_SERVER_KEY"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1774,7 +1774,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_13"), Description: aws.String("test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1784,7 +1784,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_14"), Description: aws.String("unrelated"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1794,7 +1794,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_15"), Description: aws.String("disabled key"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: false, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1804,7 +1804,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_16"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org/extra"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1870,7 +1870,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_01"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1888,7 +1888,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_01"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, @@ -1906,7 +1906,7 @@ func TestDisposeKeys(t *testing.T) { AliasName: nil, KeyID: aws.String("key_id_01"), Description: aws.String("SPIRE_SERVER_KEY/test_example_org"), - KeySpec: types.CustomerMasterKeySpecRsa4096, + KeySpec: types.KeySpecRsa4096, Enabled: true, PublicKey: []byte("foo"), CreationDate: &unixEpoch, diff --git a/pkg/server/plugin/keymanager/awskms/client_fake.go b/pkg/server/plugin/keymanager/awskms/client_fake.go index 8d2cb29e2ac..217f63e2d78 100644 --- a/pkg/server/plugin/keymanager/awskms/client_fake.go +++ b/pkg/server/plugin/keymanager/awskms/client_fake.go @@ -110,17 +110,17 @@ func (k *kmsClientFake) CreateKey(_ context.Context, input *kms.CreateKeyInput, } var privateKey crypto.Signer - switch input.CustomerMasterKeySpec { //nolint:staticcheck // not deprecated in a released version yet - case types.CustomerMasterKeySpecEccNistP256: + switch input.KeySpec { + case types.KeySpecEccNistP256: privateKey = k.testKeys.NewEC256(k.t) - case types.CustomerMasterKeySpecEccNistP384: + case types.KeySpecEccNistP384: privateKey = k.testKeys.NewEC384(k.t) - case types.CustomerMasterKeySpecRsa2048: + case types.KeySpecRsa2048: privateKey = k.testKeys.NewRSA2048(k.t) - case types.CustomerMasterKeySpecRsa4096: + case types.KeySpecRsa4096: privateKey = k.testKeys.NewRSA4096(k.t) default: - return nil, fmt.Errorf("unknown key type %q", input.CustomerMasterKeySpec) //nolint:staticcheck // not deprecated in a released version yet + return nil, fmt.Errorf("unknown key type %q", input.KeySpec) } pkixData, err := x509.MarshalPKIXPublicKey(privateKey.Public()) @@ -133,7 +133,7 @@ func (k *kmsClientFake) CreateKey(_ context.Context, input *kms.CreateKeyInput, CreationDate: aws.Time(time.Unix(0, 0)), PublicKey: pkixData, privateKey: privateKey, - KeySpec: input.CustomerMasterKeySpec, //nolint:staticcheck // not deprecated in a released version yet + KeySpec: input.KeySpec, Enabled: true, } @@ -163,12 +163,12 @@ func (k *kmsClientFake) DescribeKey(_ context.Context, input *kms.DescribeKeyInp return &kms.DescribeKeyOutput{ KeyMetadata: &types.KeyMetadata{ - KeyId: keyEntry.KeyID, - Arn: keyEntry.Arn, - CustomerMasterKeySpec: keyEntry.KeySpec, - Enabled: keyEntry.Enabled, - Description: keyEntry.Description, - CreationDate: keyEntry.CreationDate, + KeyId: keyEntry.KeyID, + Arn: keyEntry.Arn, + KeySpec: keyEntry.KeySpec, + Enabled: keyEntry.Enabled, + Description: keyEntry.Description, + CreationDate: keyEntry.CreationDate, }, }, nil } @@ -494,7 +494,7 @@ type fakeKeyEntry struct { PublicKey []byte privateKey crypto.Signer Enabled bool - KeySpec types.CustomerMasterKeySpec + KeySpec types.KeySpec } type fakeAlias struct { diff --git a/pkg/server/plugin/keymanager/awskms/fetcher.go b/pkg/server/plugin/keymanager/awskms/fetcher.go index 6086dac1060..72545f02753 100644 --- a/pkg/server/plugin/keymanager/awskms/fetcher.go +++ b/pkg/server/plugin/keymanager/awskms/fetcher.go @@ -108,9 +108,9 @@ func (kf *keyFetcher) fetchKeyEntryDetails(ctx context.Context, alias types.Alia return nil, status.Errorf(codes.FailedPrecondition, "found disabled SPIRE key: %q, alias: %q", *describeResp.KeyMetadata.Arn, *alias.AliasArn) } - keyType, ok := keyTypeFromKeySpec(describeResp.KeyMetadata.CustomerMasterKeySpec) //nolint:staticcheck // not deprecated in a released version yet + keyType, ok := keyTypeFromKeySpec(describeResp.KeyMetadata.KeySpec) if !ok { - return nil, status.Errorf(codes.Internal, "unsupported key spec: %v", describeResp.KeyMetadata.CustomerMasterKeySpec) //nolint:staticcheck // not deprecated in a released version yet + return nil, status.Errorf(codes.Internal, "unsupported key spec: %v", describeResp.KeyMetadata.KeySpec) } publicKeyResp, err := kf.kmsClient.GetPublicKey(ctx, &kms.GetPublicKeyInput{KeyId: alias.AliasArn}) diff --git a/pkg/server/plugin/keymanager/azurekeyvault/azure_key_vault.go b/pkg/server/plugin/keymanager/azurekeyvault/azure_key_vault.go index b9db9cdb4d4..e8325fb3fa9 100644 --- a/pkg/server/plugin/keymanager/azurekeyvault/azure_key_vault.go +++ b/pkg/server/plugin/keymanager/azurekeyvault/azure_key_vault.go @@ -272,7 +272,7 @@ func (p *Plugin) refreshKeys(ctx context.Context) error { } if errs != nil { - return fmt.Errorf(strings.Join(errs, ": ")) + return errors.New(strings.Join(errs, ": ")) } return nil } diff --git a/pkg/server/plugin/keymanager/gcpkms/gcpkms.go b/pkg/server/plugin/keymanager/gcpkms/gcpkms.go index a20231acf14..47b696b1ce5 100644 --- a/pkg/server/plugin/keymanager/gcpkms/gcpkms.go +++ b/pkg/server/plugin/keymanager/gcpkms/gcpkms.go @@ -777,7 +777,7 @@ func (p *Plugin) keepActiveCryptoKeys(ctx context.Context) error { } if errs != nil { - return fmt.Errorf(strings.Join(errs, "; ")) + return errors.New(strings.Join(errs, "; ")) } return nil } diff --git a/pkg/server/plugin/nodeattestor/awsiid/iid.go b/pkg/server/plugin/nodeattestor/awsiid/iid.go index f211808afa2..3df80a0c274 100644 --- a/pkg/server/plugin/nodeattestor/awsiid/iid.go +++ b/pkg/server/plugin/nodeattestor/awsiid/iid.go @@ -436,12 +436,12 @@ func unmarshalAndValidateIdentityDocument(data []byte, getAWSCACertificate func( switch publicKeyType { case RSA1024: if err := verifyRSASignature(caCert.PublicKey.(*rsa.PublicKey), attestationData.Document, signature); err != nil { - return imds.InstanceIdentityDocument{}, status.Errorf(codes.InvalidArgument, err.Error()) + return imds.InstanceIdentityDocument{}, status.Error(codes.InvalidArgument, err.Error()) } case RSA2048: pkcs7Sig, err := decodeAndParsePKCS7Signature(signature, caCert) if err != nil { - return imds.InstanceIdentityDocument{}, status.Errorf(codes.InvalidArgument, err.Error()) + return imds.InstanceIdentityDocument{}, status.Error(codes.InvalidArgument, err.Error()) } if err := pkcs7Sig.Verify(); err != nil { diff --git a/pkg/server/plugin/nodeattestor/azuremsi/msi.go b/pkg/server/plugin/nodeattestor/azuremsi/msi.go index 40ede043290..b3b662839f1 100644 --- a/pkg/server/plugin/nodeattestor/azuremsi/msi.go +++ b/pkg/server/plugin/nodeattestor/azuremsi/msi.go @@ -261,7 +261,7 @@ func (p *MSIAttestorPlugin) Configure(_ context.Context, req *configv1.Configure td, err := spiffeid.TrustDomainFromString(req.CoreConfiguration.TrustDomain) if err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) + return nil, status.Error(codes.InvalidArgument, err.Error()) } tenants := make(map[string]*tenantConfig) diff --git a/pkg/server/plugin/nodeattestor/httpchallenge/httpchallenge.go b/pkg/server/plugin/nodeattestor/httpchallenge/httpchallenge.go new file mode 100644 index 00000000000..c4ba141a42a --- /dev/null +++ b/pkg/server/plugin/nodeattestor/httpchallenge/httpchallenge.go @@ -0,0 +1,288 @@ +package httpchallenge + +import ( + "context" + "encoding/json" + "net/http" + "regexp" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/hcl" + "github.com/spiffe/go-spiffe/v2/spiffeid" + nodeattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/server/nodeattestor/v1" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/plugin/httpchallenge" + nodeattestorbase "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + pluginName = "http_challenge" +) + +var ( + agentNamePattern = regexp.MustCompile("^[a-zA-z]+[a-zA-Z0-9-]$") +) + +func BuiltIn() catalog.BuiltIn { + return builtin(New()) +} + +func BuiltInTesting(client *http.Client, forceNonce string) catalog.BuiltIn { + plugin := New() + plugin.client = client + plugin.forceNonce = forceNonce + return builtin(plugin) +} + +func builtin(p *Plugin) catalog.BuiltIn { + return catalog.MakeBuiltIn(pluginName, + nodeattestorv1.NodeAttestorPluginServer(p), + configv1.ConfigServiceServer(p), + ) +} + +type configuration struct { + trustDomain spiffeid.TrustDomain + requiredPort *int + allowNonRootPorts bool + dnsPatterns []*regexp.Regexp + tofu bool +} + +type Config struct { + AllowedDNSPatterns []string `hcl:"allowed_dns_patterns"` + RequiredPort *int `hcl:"required_port"` + AllowNonRootPorts *bool `hcl:"allow_non_root_ports"` + TOFU *bool `hcl:"tofu"` +} + +type Plugin struct { + nodeattestorbase.Base + nodeattestorv1.UnsafeNodeAttestorServer + configv1.UnsafeConfigServer + + m sync.Mutex + config *configuration + + log hclog.Logger + + client *http.Client + forceNonce string +} + +func New() *Plugin { + return &Plugin{ + client: http.DefaultClient, + } +} + +func (p *Plugin) Attest(stream nodeattestorv1.NodeAttestor_AttestServer) error { + req, err := stream.Recv() + if err != nil { + return err + } + + config, err := p.getConfig() + if err != nil { + return err + } + + payload := req.GetPayload() + if payload == nil { + return status.Error(codes.InvalidArgument, "missing attestation payload") + } + + attestationData := new(httpchallenge.AttestationData) + if err := json.Unmarshal(payload, attestationData); err != nil { + return status.Errorf(codes.InvalidArgument, "failed to unmarshal data: %v", err) + } + + if config.requiredPort != nil && attestationData.Port != *config.requiredPort { + return status.Errorf(codes.InvalidArgument, "port %d is not allowed to be used by this server", attestationData.Port) + } + if (!config.allowNonRootPorts) && attestationData.Port >= 1024 { + return status.Errorf(codes.InvalidArgument, "port %d is not allowed to be >= 1024", attestationData.Port) + } + + if err = validateAgentName(attestationData.AgentName); err != nil { + return err + } + + if err = validateHostName(attestationData.HostName, config.dnsPatterns); err != nil { + return err + } + + challenge, err := httpchallenge.GenerateChallenge(p.forceNonce) + if err != nil { + return status.Errorf(codes.Internal, "unable to generate challenge: %v", err) + } + + challengeBytes, err := json.Marshal(challenge) + if err != nil { + return status.Errorf(codes.Internal, "unable to marshal challenge: %v", err) + } + + if err := stream.Send(&nodeattestorv1.AttestResponse{ + Response: &nodeattestorv1.AttestResponse_Challenge{ + Challenge: challengeBytes, + }, + }); err != nil { + return err + } + + // receive the response. We dont really care what it is but the plugin system requiries it. + _, err = stream.Recv() + if err != nil { + return err + } + + p.log.Debug("Verifying challenge") + + timeoutctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := httpchallenge.VerifyChallenge(timeoutctx, p.client, attestationData, challenge); err != nil { + return status.Errorf(codes.PermissionDenied, "challenge verification failed: %v", err) + } + + spiffeid, err := httpchallenge.MakeAgentID(config.trustDomain, attestationData.HostName) + if err != nil { + return status.Errorf(codes.Internal, "failed to make spiffe id: %v", err) + } + + if config.tofu { + if err := p.AssessTOFU(stream.Context(), spiffeid.String(), p.log); err != nil { + return err + } + } + + return stream.Send(&nodeattestorv1.AttestResponse{ + Response: &nodeattestorv1.AttestResponse_AgentAttributes{ + AgentAttributes: &nodeattestorv1.AgentAttributes{ + SpiffeId: spiffeid.String(), + SelectorValues: buildSelectorValues(attestationData.HostName), + CanReattest: !config.tofu, + }, + }, + }) +} + +func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) { + hclConfig := new(Config) + if err := hcl.Decode(hclConfig, req.HclConfiguration); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "unable to decode configuration: %v", err) + } + + if req.CoreConfiguration == nil { + return nil, status.Error(codes.InvalidArgument, "core configuration is required") + } + + if req.CoreConfiguration.TrustDomain == "" { + return nil, status.Error(codes.InvalidArgument, "trust_domain is required") + } + + trustDomain, err := spiffeid.TrustDomainFromString(req.CoreConfiguration.TrustDomain) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "trust_domain is invalid: %v", err) + } + + var dnsPatterns []*regexp.Regexp + for _, r := range hclConfig.AllowedDNSPatterns { + re := regexp.MustCompile(r) + dnsPatterns = append(dnsPatterns, re) + } + + allowNonRootPorts := true + if hclConfig.AllowNonRootPorts != nil { + allowNonRootPorts = *hclConfig.AllowNonRootPorts + } + + tofu := true + if hclConfig.TOFU != nil { + tofu = *hclConfig.TOFU + } + + mustUseTOFU := false + switch { + // User has explicitly asked for a required port that is untrusted + case hclConfig.RequiredPort != nil && *hclConfig.RequiredPort >= 1024: + mustUseTOFU = true + // User has just chosen the defaults, any port is allowed + case hclConfig.AllowNonRootPorts == nil && hclConfig.RequiredPort == nil: + mustUseTOFU = true + // User explicitly set AllowNonRootPorts to true and no required port specified + case hclConfig.AllowNonRootPorts != nil && *hclConfig.AllowNonRootPorts && hclConfig.RequiredPort == nil: + mustUseTOFU = true + } + + if !tofu && mustUseTOFU { + return nil, status.Errorf(codes.InvalidArgument, "you can not turn off trust on first use (TOFU) when non-root ports are allowed") + } + + p.setConfiguration(&configuration{ + trustDomain: trustDomain, + dnsPatterns: dnsPatterns, + requiredPort: hclConfig.RequiredPort, + allowNonRootPorts: allowNonRootPorts, + tofu: tofu, + }) + + return &configv1.ConfigureResponse{}, nil +} + +// SetLogger sets this plugin's logger +func (p *Plugin) SetLogger(log hclog.Logger) { + p.log = log +} + +func (p *Plugin) getConfig() (*configuration, error) { + p.m.Lock() + defer p.m.Unlock() + if p.config == nil { + return nil, status.Errorf(codes.FailedPrecondition, "not configured") + } + return p.config, nil +} + +func (p *Plugin) setConfiguration(config *configuration) { + p.m.Lock() + defer p.m.Unlock() + p.config = config +} + +func buildSelectorValues(hostName string) []string { + var selectorValues []string + + selectorValues = append(selectorValues, "hostname:"+hostName) + + return selectorValues +} + +func validateAgentName(agentName string) error { + l := agentNamePattern.FindAllStringSubmatch(agentName, -1) + if len(l) != 1 || len(l[0]) == 0 || len(l[0]) > 32 { + return status.Error(codes.InvalidArgument, "agent name is not valid") + } + return nil +} + +func validateHostName(hostName string, dnsPatterns []*regexp.Regexp) error { + if hostName == "localhost" { + return status.Errorf(codes.PermissionDenied, "you can not use localhost as a hostname") + } + if len(dnsPatterns) == 0 { + return nil + } + for _, re := range dnsPatterns { + l := re.FindAllStringSubmatch(hostName, -1) + if len(l) > 0 { + return nil + } + } + return status.Errorf(codes.PermissionDenied, "the requested hostname is not allowed to connect") +} diff --git a/pkg/server/plugin/nodeattestor/httpchallenge/httpchallenge_test.go b/pkg/server/plugin/nodeattestor/httpchallenge/httpchallenge_test.go new file mode 100644 index 00000000000..cdde03f8e98 --- /dev/null +++ b/pkg/server/plugin/nodeattestor/httpchallenge/httpchallenge_test.go @@ -0,0 +1,433 @@ +package httpchallenge_test + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "net/http/httptest" + neturl "net/url" + "testing" + + "github.com/spiffe/go-spiffe/v2/spiffeid" + agentstorev1 "github.com/spiffe/spire-plugin-sdk/proto/spire/hostservice/server/agentstore/v1" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/catalog" + common_httpchallenge "github.com/spiffe/spire/pkg/common/plugin/httpchallenge" + "github.com/spiffe/spire/pkg/server/plugin/nodeattestor" + "github.com/spiffe/spire/pkg/server/plugin/nodeattestor/httpchallenge" + "github.com/spiffe/spire/proto/spire/common" + "github.com/spiffe/spire/test/fakes/fakeagentstore" + "github.com/spiffe/spire/test/plugintest" + "github.com/stretchr/testify/require" +) + +func TestConfigure(t *testing.T) { + tests := []struct { + name string + hclConf string + coreConf *configv1.CoreConfiguration + expErr string + }{ + { + name: "Configure fails if core config is not provided", + expErr: "rpc error: code = InvalidArgument desc = core configuration is required", + }, + { + name: "Configure fails if trust domain is empty", + expErr: "rpc error: code = InvalidArgument desc = trust_domain is required", + coreConf: &configv1.CoreConfiguration{}, + }, + { + name: "Configure fails if HCL config cannot be decoded", + expErr: "rpc error: code = InvalidArgument desc = unable to decode configuration", + coreConf: &configv1.CoreConfiguration{TrustDomain: "example.org"}, + hclConf: "not an HCL configuration", + }, + { + name: "Configure fails if tofu and allow_non_root_ports", + expErr: "rpc error: code = InvalidArgument desc = you can not turn off trust on first use (TOFU) when non-root ports are allowed", + coreConf: &configv1.CoreConfiguration{TrustDomain: "example.org"}, + hclConf: "tofu = false\nallow_non_root_ports = true", + }, + { + name: "Configure fails if tofu and required port >= 1024", + expErr: "rpc error: code = InvalidArgument desc = you can not turn off trust on first use (TOFU) when non-root ports are allowed", + coreConf: &configv1.CoreConfiguration{TrustDomain: "example.org"}, + hclConf: "tofu = false\nrequired_port = 1024", + }, + { + name: "Configure fails if tofu and no other args", + expErr: "rpc error: code = InvalidArgument desc = you can not turn off trust on first use (TOFU) when non-root ports are allowed", + coreConf: &configv1.CoreConfiguration{TrustDomain: "example.org"}, + hclConf: "tofu = false", + }, + { + name: "Configure fails if tofu and allow root ports is true", + expErr: "rpc error: code = InvalidArgument desc = you can not turn off trust on first use (TOFU) when non-root ports are allowed", + coreConf: &configv1.CoreConfiguration{TrustDomain: "example.org"}, + hclConf: "tofu = false\nallow_non_root_ports = true", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + plugin := httpchallenge.New() + resp, err := plugin.Configure(context.Background(), &configv1.ConfigureRequest{ + HclConfiguration: tt.hclConf, + CoreConfiguration: tt.coreConf, + }) + if tt.expErr != "" { + require.Contains(t, err.Error(), tt.expErr) + require.Nil(t, resp) + return + } + require.NoError(t, err) + require.NotNil(t, resp) + }) + } +} + +func TestAttestFailures(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/.well-known/spiffe/nodeattestor/http_challenge/default/challenge" { + t.Errorf("Expected to request '/.well-known/spiffe/nodeattestor/http_challenge/default/challenge', got: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`123456789abcdefghijklmnopqrstuvwxyz`)) + })) + defer server.Close() + + client := newClientWithLocalIntercept(server.URL) + + challengeFnNil := func(ctx context.Context, challenge []byte) ([]byte, error) { + return nil, nil + } + + tests := []struct { + name string + hclConf string + expErr string + payload []byte + challengeFn func(ctx context.Context, challenge []byte) ([]byte, error) + tofu bool + }{ + { + name: "Attest fails if payload doesnt exist", + expErr: "rpc error: code = InvalidArgument desc = payload cannot be empty", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: nil, + }, + { + name: "Attest fails if payload cannot be unmarshalled", + expErr: "rpc error: code = InvalidArgument desc = nodeattestor(http_challenge): failed to unmarshal data: invalid character 'o' in literal null (expecting 'u')", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: []byte("not a payload"), + }, + { + name: "Attest fails if hostname is blank", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): challenge verification failed: hostname must be set", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "", + AgentName: "default", + Port: 80, + }), + }, + { + name: "Attest fails if agentname is blank", + expErr: "rpc error: code = InvalidArgument desc = nodeattestor(http_challenge): agent name is not valid", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "", + Port: 80, + }), + }, + { + name: "Attest fails if hostname is localhost", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): you can not use localhost as a hostname", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "localhost", + AgentName: "default", + Port: 80, + }), + }, + { + name: "Attest fails if port is 0", + expErr: "port is invalid", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 0, + }), + }, + { + name: "Attest fails if port is negative", + expErr: "port is invalid", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: -1, + }), + }, + { + name: "Attest fails if hostname has a slash", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): challenge verification failed: hostname can not contain a slash", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "fo/o", + AgentName: "default", + Port: 80, + }), + }, + { + name: "Attest fails if hostname has a colon", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): challenge verification failed: hostname can not contain a colon", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo:1", + AgentName: "default", + Port: 80, + }), + }, + { + name: "Attest fails if agentname has a dot", + expErr: "rpc error: code = InvalidArgument desc = nodeattestor(http_challenge): agent name is not valid", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "def.ault", + Port: 80, + }), + }, + { + name: "Attest fails if required port is different from given one", + expErr: "rpc error: code = InvalidArgument desc = nodeattestor(http_challenge): port 81 is not allowed to be used by this server", + hclConf: "required_port = 80", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 81, + }), + }, + { + name: "Attest fails if non root ports are disallowed and port is >= 1024", + expErr: "rpc error: code = InvalidArgument desc = nodeattestor(http_challenge): port 1024 is not allowed to be >= 1024", + hclConf: "allow_non_root_ports = false", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 1024, + }), + }, + { + name: "Attest fails if hostname is not valid by dns pattern", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): the requested hostname is not allowed to connect", + hclConf: `allowed_dns_patterns = ["p[0-9][.]example[.]com"]`, + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 80, + }), + }, + { + name: "Attest fails if nonce does not match", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): challenge verification failed: expected nonce \"bad123456789abcdefghijklmnopqrstuvwxyz\" but got \"123456789abcdefghijklmnopqrstuvwxyz\"", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 80, + }), + }, + { + name: "Attest fails when reattesting with tofu", + expErr: "rpc error: code = PermissionDenied desc = nodeattestor(http_challenge): attestation data has already been used to attest an agent", + hclConf: "", + tofu: false, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 80, + }), + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + var testNonce string + if tt.tofu { + testNonce = "bad123456789abcdefghijklmnopqrstuvwxyz" + } else { + testNonce = "123456789abcdefghijklmnopqrstuvwxyz" + } + plugin := loadPlugin(t, tt.hclConf, !tt.tofu, client, testNonce) + result, err := plugin.Attest(context.Background(), tt.payload, tt.challengeFn) + require.Contains(t, err.Error(), tt.expErr) + require.Nil(t, result) + }) + } +} + +func TestAttestSucceeds(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/.well-known/spiffe/nodeattestor/http_challenge/default/challenge" { + t.Errorf("Expected to request '/.well-known/spiffe/nodeattestor/http_challenge/default/challenge', got: %s", r.URL.Path) + } + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`123456789abcdefghijklmnopqrstuvwxyz`)) + })) + defer server.Close() + + client := newClientWithLocalIntercept(server.URL) + + challengeFnNil := func(ctx context.Context, challenge []byte) ([]byte, error) { + return nil, nil + } + + tests := []struct { + name string + hclConf string + payload []byte + challengeFn func(ctx context.Context, challenge []byte) ([]byte, error) + expectedAgentID string + expectedSelectors []*common.Selector + tofu bool + }{ + { + name: "Attest succeeds for defaults", + hclConf: "", + tofu: true, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 80, + }), + expectedAgentID: "spiffe://example.org/spire/agent/http_challenge/foo", + expectedSelectors: []*common.Selector{ + { + Type: "http_challenge", + Value: "hostname:foo", + }, + }, + }, + { + name: "Attest succeeds for reattest without tofu", + hclConf: "tofu = false\nallow_non_root_ports = false", + tofu: false, + challengeFn: challengeFnNil, + payload: marshalPayload(t, &common_httpchallenge.AttestationData{ + HostName: "foo", + AgentName: "default", + Port: 80, + }), + expectedAgentID: "spiffe://example.org/spire/agent/http_challenge/foo", + expectedSelectors: []*common.Selector{ + { + Type: "http_challenge", + Value: "hostname:foo", + }, + }, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + testNonce := "123456789abcdefghijklmnopqrstuvwxyz" + plugin := loadPlugin(t, tt.hclConf, !tt.tofu, client, testNonce) + result, err := plugin.Attest(context.Background(), tt.payload, tt.challengeFn) + require.NoError(t, err) + require.NotNil(t, result) + + require.Equal(t, tt.expectedAgentID, result.AgentID) + requireSelectorsMatch(t, tt.expectedSelectors, result.Selectors) + }) + } +} + +func loadPlugin(t *testing.T, config string, testTOFU bool, client *http.Client, testNonce string) nodeattestor.NodeAttestor { + v1 := new(nodeattestor.V1) + agentStore := fakeagentstore.New() + var configureErr error + if testTOFU { + agentStore.SetAgentInfo(&agentstorev1.AgentInfo{ + AgentId: "spiffe://example.org/spire/agent/http_challenge/foo", + }) + } + opts := []plugintest.Option{ + plugintest.Configure(config), + plugintest.CaptureConfigureError(&configureErr), + plugintest.HostServices(agentstorev1.AgentStoreServiceServer(agentStore)), + plugintest.CoreConfig(catalog.CoreConfig{ + TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"), + }), + } + plugintest.Load(t, httpchallenge.BuiltInTesting(client, testNonce), v1, opts...) + return v1 +} + +func marshalPayload(t *testing.T, attReq *common_httpchallenge.AttestationData) []byte { + attReqBytes, err := json.Marshal(attReq) + require.NoError(t, err) + return attReqBytes +} + +func requireSelectorsMatch(t *testing.T, expected []*common.Selector, actual []*common.Selector) { + require.Equal(t, len(expected), len(actual)) + for idx, expSel := range expected { + require.Equal(t, expSel.Type, actual[idx].Type) + require.Equal(t, expSel.Value, actual[idx].Value) + } +} + +func newClientWithLocalIntercept(url string) *http.Client { + u, _ := neturl.Parse(url) + _, port, _ := net.SplitHostPort(u.Host) + return &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + defaultDialContext := http.DefaultTransport.(*http.Transport).DialContext + if addr == "foo:80" { + addr = fmt.Sprintf("127.0.0.1:%s", port) + } + return defaultDialContext(ctx, network, addr) + }, + }, + } +} diff --git a/pkg/server/plugin/upstreamauthority/awspca/pca.go b/pkg/server/plugin/upstreamauthority/awspca/pca.go index 6973a37d996..e3c596c190a 100644 --- a/pkg/server/plugin/upstreamauthority/awspca/pca.go +++ b/pkg/server/plugin/upstreamauthority/awspca/pca.go @@ -263,13 +263,13 @@ func (p *PCAPlugin) MintX509CAAndSubscribe(request *upstreamauthorityv1.MintX509 upstreamRoot := certChain[len(certChain)-1] bundle := x509util.DedupeCertificates([]*x509.Certificate{upstreamRoot}, config.supplementalBundle) - upstreamX509Roots, err := x509certificate.ToPluginProtos(bundle) + upstreamX509Roots, err := x509certificate.ToPluginFromCertificates(bundle) if err != nil { return status.Errorf(codes.Internal, "unable to form response upstream X.509 roots: %v", err) } // All else comprises the chain (including the issued certificate) - x509CAChain, err := x509certificate.ToPluginProtos(append([]*x509.Certificate{cert}, certChain[:len(certChain)-1]...)) + x509CAChain, err := x509certificate.ToPluginFromCertificates(append([]*x509.Certificate{cert}, certChain[:len(certChain)-1]...)) if err != nil { return status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } diff --git a/pkg/server/plugin/upstreamauthority/awspca/pca_test.go b/pkg/server/plugin/upstreamauthority/awspca/pca_test.go index f47f1ee5228..ffea2f64a9a 100644 --- a/pkg/server/plugin/upstreamauthority/awspca/pca_test.go +++ b/pkg/server/plugin/upstreamauthority/awspca/pca_test.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/acmpca" acmpcatypes "github.com/aws/aws-sdk-go-v2/service/acmpca/types" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/common/pemutil" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" "github.com/spiffe/spire/proto/spire/common" @@ -292,16 +293,20 @@ func TestMintX509CA(t *testing.T) { getCertificateErr error expectMsgPrefix string expectX509CA []*x509.Certificate - expectX509Authorities []*x509.Certificate + expectX509Authorities []*x509certificate.X509Authority expectTTL time.Duration }{ { - test: "Successful mint", - config: successConfig, - csr: makeCSR("spiffe://example.com/foo"), - preferredTTL: 300 * time.Second, - expectX509CA: []*x509.Certificate{expectCert, intermediateCert}, - expectX509Authorities: []*x509.Certificate{bundleCert}, + test: "Successful mint", + config: successConfig, + csr: makeCSR("spiffe://example.com/foo"), + preferredTTL: 300 * time.Second, + expectX509CA: []*x509.Certificate{expectCert, intermediateCert}, + expectX509Authorities: []*x509certificate.X509Authority{ + { + Certificate: bundleCert, + }, + }, getCertificateCert: encodedCert.String(), getCertificateCertChain: encodedCertChain.String(), }, @@ -315,10 +320,17 @@ func TestMintX509CA(t *testing.T) { AssumeRoleARN: validAssumeRoleARN, SupplementalBundlePath: supplementalBundlePath, }, - csr: makeCSR("spiffe://example.com/foo"), - preferredTTL: 300 * time.Second, - expectX509CA: []*x509.Certificate{expectCert, intermediateCert}, - expectX509Authorities: []*x509.Certificate{bundleCert, supplementalCert[0]}, + csr: makeCSR("spiffe://example.com/foo"), + preferredTTL: 300 * time.Second, + expectX509CA: []*x509.Certificate{expectCert, intermediateCert}, + expectX509Authorities: []*x509certificate.X509Authority{ + { + Certificate: bundleCert, + }, + { + Certificate: supplementalCert[0], + }, + }, getCertificateCert: encodedCert.String(), getCertificateCertChain: encodedCertChain.String(), }, @@ -354,7 +366,7 @@ func TestMintX509CA(t *testing.T) { config: successConfig, csr: makeCSR("spiffe://example.com/foo"), preferredTTL: 300 * time.Second, - getCertificateCert: "no a certificate", + getCertificateCert: "not a certificate", getCertificateCertChain: encodedCertChain.String(), expectCode: codes.Internal, expectMsgPrefix: "upstreamauthority(aws_pca): failed to parse certificate from response: no PEM blocks", @@ -365,7 +377,7 @@ func TestMintX509CA(t *testing.T) { csr: makeCSR("spiffe://example.com/foo"), preferredTTL: 300 * time.Second, getCertificateCert: encodedCert.String(), - getCertificateCertChain: "no a cert chain", + getCertificateCertChain: "not a cert chain", expectCode: codes.Internal, expectMsgPrefix: "upstreamauthority(aws_pca): failed to parse certificate chain from response: no PEM blocks", }, @@ -408,7 +420,7 @@ func TestMintX509CA(t *testing.T) { } assert.Equal(t, tt.expectX509CA, x509CA, "unexpected X509CA") - assert.Equal(t, tt.expectX509Authorities, x509Authorities, "unexected authorities") + assert.Equal(t, tt.expectX509Authorities, x509Authorities, "unexpected authorities") // Plugin does not support streaming back changes so assert the // stream returns EOF. diff --git a/pkg/server/plugin/upstreamauthority/awssecret/awssecret.go b/pkg/server/plugin/upstreamauthority/awssecret/awssecret.go index fcc1b72847e..300e90b86f8 100644 --- a/pkg/server/plugin/upstreamauthority/awssecret/awssecret.go +++ b/pkg/server/plugin/upstreamauthority/awssecret/awssecret.go @@ -138,12 +138,12 @@ func (p *Plugin) MintX509CAAndSubscribe(request *upstreamauthorityv1.MintX509CAR return status.Errorf(codes.Internal, "unable to sign CSR: %v", err) } - x509CAChain, err := x509certificate.ToPluginProtos(append([]*x509.Certificate{cert}, p.upstreamCerts...)) + x509CAChain, err := x509certificate.ToPluginFromCertificates(append([]*x509.Certificate{cert}, p.upstreamCerts...)) if err != nil { return status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } - upstreamX509Roots, err := x509certificate.ToPluginProtos(p.bundleCerts) + upstreamX509Roots, err := x509certificate.ToPluginFromCertificates(p.bundleCerts) if err != nil { return status.Errorf(codes.Internal, "unable to form response upstream X.509 roots: %v", err) } diff --git a/pkg/server/plugin/upstreamauthority/awssecret/awssecret_test.go b/pkg/server/plugin/upstreamauthority/awssecret/awssecret_test.go index 437e2dcbfcd..4f426b8f709 100644 --- a/pkg/server/plugin/upstreamauthority/awssecret/awssecret_test.go +++ b/pkg/server/plugin/upstreamauthority/awssecret/awssecret_test.go @@ -10,6 +10,7 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/common/cryptoutil" "github.com/spiffe/spire/pkg/common/x509svid" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" @@ -242,7 +243,9 @@ func TestMintX509CA(t *testing.T) { clk := clock.NewMock(t) certsAndKeys, fakeStorageClientCreator := generateTestData(t, clk) - x509Authority := []*x509.Certificate{certsAndKeys.rootCert} + x509Authority := []*x509certificate.X509Authority{ + {Certificate: certsAndKeys.rootCert}, + } makeCSR := func(spiffeID string) []byte { csr, err := util.NewCSRTemplateWithKey(spiffeID, key) @@ -279,7 +282,7 @@ func TestMintX509CA(t *testing.T) { expectCode codes.Code expectMsgPrefix string expectX509CASpiffeID string - expectedX509Authorities []*x509.Certificate + expectedX509Authorities []*x509certificate.X509Authority expectTTL time.Duration numExpectedCAs int }{ diff --git a/pkg/server/plugin/upstreamauthority/certmanager/certmanager.go b/pkg/server/plugin/upstreamauthority/certmanager/certmanager.go index 25c0cd7dd83..7b565f4ab53 100644 --- a/pkg/server/plugin/upstreamauthority/certmanager/certmanager.go +++ b/pkg/server/plugin/upstreamauthority/certmanager/certmanager.go @@ -208,12 +208,12 @@ func (p *Plugin) MintX509CAAndSubscribe(request *upstreamauthorityv1.MintX509CAR return status.Errorf(codes.Internal, "failed to parse CA certificate: %v", err) } - x509CAChain, err := x509certificate.ToPluginProtos(caChain) + x509CAChain, err := x509certificate.ToPluginFromCertificates(caChain) if err != nil { return status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } - upstreamX509Roots, err := x509certificate.ToPluginProtos(upstreamRoot) + upstreamX509Roots, err := x509certificate.ToPluginFromCertificates(upstreamRoot) if err != nil { return status.Errorf(codes.Internal, "unable to form response upstream X.509 roots: %v", err) } diff --git a/pkg/server/plugin/upstreamauthority/certmanager/certmanager_test.go b/pkg/server/plugin/upstreamauthority/certmanager/certmanager_test.go index c722bab2e90..d4115600d8a 100644 --- a/pkg/server/plugin/upstreamauthority/certmanager/certmanager_test.go +++ b/pkg/server/plugin/upstreamauthority/certmanager/certmanager_test.go @@ -12,6 +12,7 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" cmapi "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority/certmanager/internal/v1" "github.com/spiffe/spire/proto/spire/common" @@ -58,7 +59,7 @@ func Test_MintX509CA(t *testing.T) { preferredTTL time.Duration updateCR func(t *testing.T, cr *cmapi.CertificateRequest) expectX509CA []*x509.Certificate - expectX509Authorities []*x509.Certificate + expectX509Authorities []*x509certificate.X509Authority expectCode codes.Code expectMsgPrefix string }{ @@ -120,8 +121,10 @@ func Test_MintX509CA(t *testing.T) { cr.Status.Certificate = intermediatePEM cr.Status.CA = rootPEM }, - expectX509CA: []*x509.Certificate{intermediate}, - expectX509Authorities: []*x509.Certificate{root}, + expectX509CA: []*x509.Certificate{intermediate}, + expectX509Authorities: []*x509certificate.X509Authority{ + {Certificate: root}, + }, }, } diff --git a/pkg/server/plugin/upstreamauthority/disk/disk.go b/pkg/server/plugin/upstreamauthority/disk/disk.go index a53d7f1f94b..0820d8694d0 100644 --- a/pkg/server/plugin/upstreamauthority/disk/disk.go +++ b/pkg/server/plugin/upstreamauthority/disk/disk.go @@ -123,12 +123,12 @@ func (p *Plugin) MintX509CAAndSubscribe(request *upstreamauthorityv1.MintX509CAR return status.Errorf(codes.Internal, "unable to sign CSR: %v", err) } - x509CAChain, err := x509certificate.ToPluginProtos(append([]*x509.Certificate{cert}, upstreamCerts.certChain...)) + x509CAChain, err := x509certificate.ToPluginFromCertificates(append([]*x509.Certificate{cert}, upstreamCerts.certChain...)) if err != nil { return status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } - upstreamX509Roots, err := x509certificate.ToPluginProtos(upstreamCerts.trustBundle) + upstreamX509Roots, err := x509certificate.ToPluginFromCertificates(upstreamCerts.trustBundle) if err != nil { return status.Errorf(codes.Internal, "unable to form response upstream X.509 roots: %v", err) } diff --git a/pkg/server/plugin/upstreamauthority/disk/disk_test.go b/pkg/server/plugin/upstreamauthority/disk/disk_test.go index 6d87a00eb49..4b854b22a54 100644 --- a/pkg/server/plugin/upstreamauthority/disk/disk_test.go +++ b/pkg/server/plugin/upstreamauthority/disk/disk_test.go @@ -18,6 +18,7 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/common/cryptoutil" "github.com/spiffe/spire/pkg/common/pemutil" "github.com/spiffe/spire/pkg/common/x509svid" @@ -163,7 +164,7 @@ func TestMintX509CA(t *testing.T) { assert.Equal(t, tt.expectTTL, ttl, "TTL does not match") } assert.Equal(t, tt.expectX509CA, certChainURIs(x509CA)) - assert.Equal(t, tt.expectedX509Authorities, certChainURIs(x509Authorities)) + assert.Equal(t, tt.expectedX509Authorities, authChainURIs(x509Authorities)) // Plugin does not support streaming back changes so assert the // stream returns EOF. @@ -346,6 +347,14 @@ func certChainURIs(chain []*x509.Certificate) []string { return uris } +func authChainURIs(chain []*x509certificate.X509Authority) []string { + var uris []string + for _, authority := range chain { + uris = append(uris, certURI(authority.Certificate)) + } + return uris +} + func certURI(cert *x509.Certificate) string { if len(cert.URIs) == 1 { return cert.URIs[0].String() diff --git a/pkg/server/plugin/upstreamauthority/ejbca/ejbca.go b/pkg/server/plugin/upstreamauthority/ejbca/ejbca.go new file mode 100644 index 00000000000..57226c8f372 --- /dev/null +++ b/pkg/server/plugin/upstreamauthority/ejbca/ejbca.go @@ -0,0 +1,383 @@ +package ejbca + +import ( + "context" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "math/big" + "os" + "sync" + + ejbcaclient "github.com/Keyfactor/ejbca-go-client-sdk/api/ejbca" + "github.com/hashicorp/go-hclog" + "github.com/spiffe/spire-plugin-sdk/pluginsdk" + upstreamauthorityv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/server/upstreamauthority/v1" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + // This compile-time assertion ensures the plugin conforms properly to the + // pluginsdk.NeedsLogger interface. + _ pluginsdk.NeedsLogger = (*Plugin)(nil) +) + +const ( + pluginName = "ejbca" +) + +type newEjbcaAuthenticatorFunc func(*Config) (ejbcaclient.Authenticator, error) +type getEnvFunc func(string) string +type readFileFunc func(string) ([]byte, error) + +// Plugin implements the UpstreamAuthority plugin +type Plugin struct { + // UnimplementedUpstreamAuthorityServer is embedded to satisfy gRPC + upstreamauthorityv1.UnimplementedUpstreamAuthorityServer + + // UnimplementedConfigServer is embedded to satisfy gRPC + configv1.UnimplementedConfigServer + + config *Config + configMtx sync.RWMutex + + // The logger received from the framework via the SetLogger method + logger hclog.Logger + + client ejbcaClient + + hooks struct { + newAuthenticator newEjbcaAuthenticatorFunc + getEnv getEnvFunc + readFile readFileFunc + } +} + +func BuiltIn() catalog.BuiltIn { + return builtin(New()) +} + +func builtin(p *Plugin) catalog.BuiltIn { + return catalog.MakeBuiltIn(pluginName, + upstreamauthorityv1.UpstreamAuthorityPluginServer(p), + configv1.ConfigServiceServer(p), + ) +} + +// Config defines the configuration for the plugin. +type Config struct { + Hostname string `hcl:"hostname" json:"hostname"` + CaCertPath string `hcl:"ca_cert_path" json:"ca_cert_path"` + ClientCertPath string `hcl:"client_cert_path" json:"client_cert_path"` + ClientCertKeyPath string `hcl:"client_cert_key_path" json:"client_cert_key_path"` + CAName string `hcl:"ca_name" json:"ca_name"` + EndEntityProfileName string `hcl:"end_entity_profile_name" json:"end_entity_profile_name"` + CertificateProfileName string `hcl:"certificate_profile_name" json:"certificate_profile_name"` + DefaultEndEntityName string `hcl:"end_entity_name" json:"end_entity_name"` + AccountBindingID string `hcl:"account_binding_id" json:"account_binding_id"` +} + +// New returns an instantiated EJBCA UpstreamAuthority plugin +func New() *Plugin { + p := &Plugin{} + p.hooks.newAuthenticator = p.getAuthenticator + p.hooks.getEnv = os.Getenv + p.hooks.readFile = os.ReadFile + return p +} + +// Configure configures the EJBCA UpstreamAuthority plugin. This is invoked by SPIRE when the plugin is +// first loaded. After the first invocation, it may be used to reconfigure the plugin. +func (p *Plugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (*configv1.ConfigureResponse, error) { + config, err := p.parseConfig(req) + if err != nil { + return nil, err + } + + authenticator, err := p.hooks.newAuthenticator(config) + if err != nil { + return nil, err + } + + client, err := p.newEjbcaClient(config, authenticator) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to create EJBCA client: %v", err) + } + + p.setConfig(config) + p.setClient(client) + return &configv1.ConfigureResponse{}, nil +} + +// SetLogger is called by the framework when the plugin is loaded and provides +// the plugin with a logger wired up to SPIRE's logging facilities. +func (p *Plugin) SetLogger(logger hclog.Logger) { + p.logger = logger +} + +// MintX509CAAndSubscribe implements the UpstreamAuthority MintX509CAAndSubscribe RPC. Mints an X.509 CA and responds +// with the signed X.509 CA certificate chain and upstream X.509 roots. The stream is kept open but new roots will +// not be published unless the CA is rotated and a new X.509 CA is minted. +// +// Implementation note: +// - It's important that the EJBCA Certificate Profile and End Entity Profile are properly configured before +// using this plugin. The plugin does not attempt to configure these profiles. +func (p *Plugin) MintX509CAAndSubscribe(req *upstreamauthorityv1.MintX509CARequest, stream upstreamauthorityv1.UpstreamAuthority_MintX509CAAndSubscribeServer) error { + var err error + if p.client == nil { + return status.Error(codes.FailedPrecondition, "ejbca upstreamauthority is not configured") + } + + logger := p.logger.Named("MintX509CAAndSubscribe") + config, err := p.getConfig() + if err != nil { + return err + } + + logger.Debug("Parsing CSR from request") + parsedCsr, err := x509.ParseCertificateRequest(req.Csr) + if err != nil { + return status.Errorf(codes.InvalidArgument, "unable to parse CSR: %v", err) + } + csrPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: req.Csr}) + + logger.Debug("Determining end entity name") + endEntityName, err := p.getEndEntityName(config, parsedCsr) + if err != nil { + return status.Errorf(codes.Internal, "unable to determine end entity name: %v", err) + } + + logger.Debug("Preparing EJBCA enrollment request") + password, err := generateRandomString(16) + if err != nil { + return status.Errorf(codes.Internal, "failed to generate random password: %v", err) + } + enrollConfig := ejbcaclient.NewEnrollCertificateRestRequest() + enrollConfig.SetUsername(endEntityName) + enrollConfig.SetPassword(password) + + // Configure the request using local state and the CSR + enrollConfig.SetCertificateRequest(string(csrPem)) + enrollConfig.SetCertificateAuthorityName(config.CAName) + enrollConfig.SetCertificateProfileName(config.CertificateProfileName) + enrollConfig.SetEndEntityProfileName(config.EndEntityProfileName) + enrollConfig.SetIncludeChain(true) + enrollConfig.SetAccountBindingId(config.AccountBindingID) + + logger.Debug("Prepared EJBCA enrollment request", "subject", parsedCsr.Subject.String(), "uriSANs", parsedCsr.URIs, "endEntityName", endEntityName, "caName", config.CAName, "certificateProfileName", config.CertificateProfileName, "endEntityProfileName", config.EndEntityProfileName, "accountBindingId", config.AccountBindingID) + + logger.Info("Enrolling certificate with EJBCA") + enrollResponse, httpResponse, err := p.client.EnrollPkcs10Certificate(stream.Context()). + EnrollCertificateRestRequest(*enrollConfig). + Execute() + if err != nil { + return p.parseEjbcaError("failed to enroll CSR", err) + } + if httpResponse != nil && httpResponse.Body != nil { + httpResponse.Body.Close() + } + + var certBytes []byte + var caBytes []byte + switch { + case enrollResponse.GetResponseFormat() == "PEM": + logger.Debug("EJBCA returned certificate in PEM format - serializing") + + block, _ := pem.Decode([]byte(enrollResponse.GetCertificate())) + if block == nil { + return status.Error(codes.Internal, "failed to parse certificate PEM") + } + certBytes = block.Bytes + + for _, ca := range enrollResponse.CertificateChain { + block, _ := pem.Decode([]byte(ca)) + if block == nil { + return status.Error(codes.Internal, "failed to parse CA certificate PEM") + } + caBytes = append(caBytes, block.Bytes...) + } + case enrollResponse.GetResponseFormat() == "DER": + logger.Debug("EJBCA returned certificate in DER format - serializing") + + bytes, err := base64.StdEncoding.DecodeString(enrollResponse.GetCertificate()) + if err != nil { + return status.Errorf(codes.Internal, "failed to base64 decode DER certificate: %v", err) + } + certBytes = append(certBytes, bytes...) + + for _, ca := range enrollResponse.CertificateChain { + bytes, err := base64.StdEncoding.DecodeString(ca) + if err != nil { + return status.Errorf(codes.Internal, "failed to base64 decode DER CA certificate: %v", err) + } + caBytes = append(caBytes, bytes...) + } + default: + return status.Errorf(codes.Internal, "ejbca returned unsupported certificate format: %q", enrollResponse.GetResponseFormat()) + } + + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return status.Errorf(codes.Internal, "failed to serialize certificate issued by EJBCA: %v", err) + } + + caChain, err := x509.ParseCertificates(caBytes) + if err != nil { + return status.Errorf(codes.Internal, "failed to serialize CA chain returned by EJBCA: %v", err) + } + + if len(caChain) == 0 { + return status.Error(codes.Internal, "EJBCA did not return a CA chain") + } + + rootCa := caChain[len(caChain)-1] + logger.Debug("Retrieved root CA from CA chain", "rootCa", rootCa.Subject.String(), "intermediates", len(caChain)-1) + + // x509CertificateChain contains the leaf CA certificate, then any intermediates up to but not including the root CA. + x509CertificateAuthorityChain, err := x509certificate.ToPluginFromCertificates(append([]*x509.Certificate{cert}, caChain[:len(caChain)-1]...)) + if err != nil { + return status.Errorf(codes.Internal, "failed to serialize certificate chain: %v", err) + } + + rootCACertificate, err := x509certificate.ToPluginFromCertificates([]*x509.Certificate{rootCa}) + if err != nil { + return status.Errorf(codes.Internal, "failed to serialize upstream X.509 roots: %v", err) + } + + return stream.Send(&upstreamauthorityv1.MintX509CAResponse{ + X509CaChain: x509CertificateAuthorityChain, + UpstreamX509Roots: rootCACertificate, + }) +} + +// The EJBCA UpstreamAuthority plugin does not support publishing JWT keys. +func (p *Plugin) PublishJWTKeyAndSubscribe(_ *upstreamauthorityv1.PublishJWTKeyRequest, _ upstreamauthorityv1.UpstreamAuthority_PublishJWTKeyAndSubscribeServer) error { + return status.Error(codes.Unimplemented, "publishing JWT keys is not supported by the EJBCA UpstreamAuthority plugin") +} + +// setConfig replaces the configuration atomically under a write lock. +func (p *Plugin) setConfig(config *Config) { + p.configMtx.Lock() + p.config = config + p.configMtx.Unlock() +} + +// getConfig gets the configuration under a read lock. +func (p *Plugin) getConfig() (*Config, error) { + p.configMtx.RLock() + defer p.configMtx.RUnlock() + if p.config == nil { + return nil, status.Error(codes.FailedPrecondition, "not configured") + } + return p.config, nil +} + +// setClient replaces the client atomically under a write lock. +func (p *Plugin) setClient(client ejbcaClient) { + p.configMtx.Lock() + p.client = client + p.configMtx.Unlock() +} + +// getEndEntityName calculates the End Entity Name based on the default_end_entity_name from the EJBCA UpstreamAuthority +// configuration. The possible values are: +// - cn: Uses the Common Name from the CSR's Distinguished Name. +// - dns: Uses the first DNS Name from the CSR's Subject Alternative Names (SANs). +// - uri: Uses the first URI from the CSR's Subject Alternative Names (SANs). +// - ip: Uses the first IP Address from the CSR's Subject Alternative Names (SANs). +// - Custom Value: Any other string will be directly used as the End Entity Name. +// If the default_end_entity_name is not set, the plugin will determine the End Entity Name in the same order as above. +func (p *Plugin) getEndEntityName(config *Config, csr *x509.CertificateRequest) (string, error) { + logger := p.logger.Named("getEndEntityName") + + eeName := "" + // 1. If the endEntityName option is set, determine the end entity name based on the option + // 2. If the endEntityName option is not set, determine the end entity name based on the CSR + + // cn: Use the CommonName from the CertificateRequest's DN + if config.DefaultEndEntityName == "cn" || config.DefaultEndEntityName == "" { + if csr.Subject.CommonName != "" { + eeName = csr.Subject.CommonName + logger.Debug("Using CommonName from the CSR's DN as the EJBCA end entity name", "endEntityName", eeName) + return eeName, nil + } + } + + // dns: Use the first DNSName from the CertificateRequest's DNSNames SANs + if config.DefaultEndEntityName == "dns" || config.DefaultEndEntityName == "" { + if len(csr.DNSNames) > 0 && csr.DNSNames[0] != "" { + eeName = csr.DNSNames[0] + logger.Debug("Using the first DNSName from the CSR's DNSNames SANs as the EJBCA end entity name", "endEntityName", eeName) + return eeName, nil + } + } + + // uri: Use the first URI from the CertificateRequest's URI Sans + if config.DefaultEndEntityName == "uri" || config.DefaultEndEntityName == "" { + if len(csr.URIs) > 0 { + eeName = csr.URIs[0].String() + logger.Debug("Using the first URI from the CSR's URI Sans as the EJBCA end entity name", "endEntityName", eeName) + return eeName, nil + } + } + + // ip: Use the first IPAddress from the CertificateRequest's IPAddresses SANs + if config.DefaultEndEntityName == "ip" || config.DefaultEndEntityName == "" { + if len(csr.IPAddresses) > 0 { + eeName = csr.IPAddresses[0].String() + logger.Debug("Using the first IPAddress from the CSR's IPAddresses SANs as the EJBCA end entity name", "endEntityName", eeName) + return eeName, nil + } + } + + // End of defaults; if the endEntityName option is set to anything but cn, dns, or uri, use the option as the end entity name + if config.DefaultEndEntityName != "" && config.DefaultEndEntityName != "cn" && config.DefaultEndEntityName != "dns" && config.DefaultEndEntityName != "uri" { + eeName = config.DefaultEndEntityName + logger.Debug("Using the default_end_entity_name config value as the EJBCA end entity name", "endEntityName", eeName) + return eeName, nil + } + + // If we get here, we were unable to determine the end entity name + logger.Error(fmt.Sprintf("the endEntityName option is set to %q, but no valid end entity name could be determined from the CertificateRequest", config.DefaultEndEntityName)) + + return "", fmt.Errorf("no valid end entity name could be determined from the CertificateRequest") +} + +// parseEjbcaError parses an error returned by the EJBCA API and returns a gRPC status error. +func (p *Plugin) parseEjbcaError(detail string, err error) error { + if err == nil { + return nil + } + logger := p.logger.Named("parseEjbcaError") + errString := fmt.Sprintf("%s - %s", detail, err.Error()) + + ejbcaError := &ejbcaclient.GenericOpenAPIError{} + if errors.As(err, &ejbcaError) { + errString += fmt.Sprintf(" - EJBCA API returned error %s", ejbcaError.Body()) + } + + logger.Error("EJBCA returned an error", "error", errString) + + return status.Errorf(codes.Internal, "EJBCA returned an error: %s", errString) +} + +// generateRandomString generates a random string of the specified length +func generateRandomString(length int) (string, error) { + letters := []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + b := make([]rune, length) + for i := range b { + num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters)))) + if err != nil { + return "", err + } + b[i] = letters[num.Int64()] + } + return string(b), nil +} diff --git a/pkg/server/plugin/upstreamauthority/ejbca/ejbca_client.go b/pkg/server/plugin/upstreamauthority/ejbca/ejbca_client.go new file mode 100644 index 00000000000..7d263120bb5 --- /dev/null +++ b/pkg/server/plugin/upstreamauthority/ejbca/ejbca_client.go @@ -0,0 +1,143 @@ +package ejbca + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + + ejbcaclient "github.com/Keyfactor/ejbca-go-client-sdk/api/ejbca" + "github.com/gogo/status" + "github.com/hashicorp/hcl" + configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1" + "github.com/spiffe/spire/pkg/common/pemutil" + "google.golang.org/grpc/codes" +) + +type ejbcaClient interface { + EnrollPkcs10Certificate(ctx context.Context) ejbcaclient.ApiEnrollPkcs10CertificateRequest +} + +func (p *Plugin) parseConfig(req *configv1.ConfigureRequest) (*Config, error) { + logger := p.logger.Named("parseConfig") + config := new(Config) + logger.Debug("Decoding EJBCA configuration") + if err := hcl.Decode(&config, req.HclConfiguration); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "failed to decode configuration: %v", err) + } + + if config.Hostname == "" { + return nil, status.Error(codes.InvalidArgument, "hostname is required") + } + if config.CAName == "" { + return nil, status.Error(codes.InvalidArgument, "ca_name is required") + } + if config.EndEntityProfileName == "" { + return nil, status.Error(codes.InvalidArgument, "end_entity_profile_name is required") + } + if config.CertificateProfileName == "" { + return nil, status.Error(codes.InvalidArgument, "certificate_profile_name is required") + } + + // If ClientCertPath or ClientCertKeyPath were not found in the main server conf file, + // load them from the environment. + if config.ClientCertPath == "" { + config.ClientCertPath = p.hooks.getEnv("EJBCA_CLIENT_CERT_PATH") + } + if config.ClientCertKeyPath == "" { + config.ClientCertKeyPath = p.hooks.getEnv("EJBCA_CLIENT_CERT_KEY_PATH") + } + + // If ClientCertPath or ClientCertKeyPath were not present in either the conf file or + // the environment, return an error. + if config.ClientCertPath == "" { + logger.Error("Client certificate is required for mTLS authentication") + return nil, status.Error(codes.InvalidArgument, "client_cert or EJBCA_CLIENT_CERT_PATH is required for mTLS authentication") + } + if config.ClientCertKeyPath == "" { + logger.Error("Client key is required for mTLS authentication") + return nil, status.Error(codes.InvalidArgument, "client_key or EJBCA_CLIENT_KEY_PATH is required for mTLS authentication") + } + + if config.CaCertPath == "" { + config.CaCertPath = p.hooks.getEnv("EJBCA_CA_CERT_PATH") + } + + return config, nil +} + +func (p *Plugin) getAuthenticator(config *Config) (ejbcaclient.Authenticator, error) { + var err error + logger := p.logger.Named("getAuthenticator") + + var caChain []*x509.Certificate + if config.CaCertPath != "" { + logger.Debug("Parsing CA chain from file", "path", config.CaCertPath) + caChainBytes, err := p.hooks.readFile(config.CaCertPath) + if err != nil { + return nil, fmt.Errorf("failed to read CA chain from file: %w", err) + } + + chain, err := pemutil.ParseCertificates(caChainBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse CA chain: %w", err) + } + + logger.Debug("Parsed CA chain", "length", len(caChain)) + caChain = chain + } + + logger.Debug("Creating mTLS authenticator") + + logger.Debug("Reading client certificate from file", "path", config.ClientCertPath) + clientCertBytes, err := p.hooks.readFile(config.ClientCertPath) + if err != nil { + return nil, fmt.Errorf("failed to read client certificate from file: %w", err) + } + logger.Debug("Reading client key from file", "path", config.ClientCertKeyPath) + clientKeyBytes, err := p.hooks.readFile(config.ClientCertKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to read client key from file: %w", err) + } + + tlsCert, err := tls.X509KeyPair(clientCertBytes, clientKeyBytes) + if err != nil { + return nil, fmt.Errorf("failed to load client certificate: %w", err) + } + + authenticator, err := ejbcaclient.NewMTLSAuthenticatorBuilder(). + WithClientCertificate(&tlsCert). + WithCaCertificates(caChain). + Build() + if err != nil { + return nil, fmt.Errorf("failed to build MTLS authenticator: %w", err) + } + + logger.Debug("Created mTLS authenticator") + + return authenticator, nil +} + +// newEjbcaClient generates a new EJBCA client based on the provided configuration. +func (p *Plugin) newEjbcaClient(config *Config, authenticator ejbcaclient.Authenticator) (ejbcaClient, error) { + logger := p.logger.Named("newEjbcaClient") + if config == nil { + return nil, status.Error(codes.InvalidArgument, "config is required") + } + if authenticator == nil { + return nil, status.Error(codes.InvalidArgument, "authenticator is required") + } + + configuration := ejbcaclient.NewConfiguration() + configuration.Host = config.Hostname + + configuration.SetAuthenticator(authenticator) + + ejbcaClient, err := ejbcaclient.NewAPIClient(configuration) + if err != nil { + return nil, err + } + + logger.Info("Created EJBCA REST API client for EJBCA UpstreamAuthority plugin") + return ejbcaClient.V1CertificateApi, nil +} diff --git a/pkg/server/plugin/upstreamauthority/ejbca/ejbca_test.go b/pkg/server/plugin/upstreamauthority/ejbca/ejbca_test.go new file mode 100644 index 00000000000..5b740ffcfd9 --- /dev/null +++ b/pkg/server/plugin/upstreamauthority/ejbca/ejbca_test.go @@ -0,0 +1,815 @@ +package ejbca + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" + + ejbcaclient "github.com/Keyfactor/ejbca-go-client-sdk/api/ejbca" + "github.com/hashicorp/go-hclog" + "github.com/spiffe/go-spiffe/v2/spiffeid" + commonutil "github.com/spiffe/spire/pkg/common/util" + "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" + "github.com/spiffe/spire/test/plugintest" + "github.com/spiffe/spire/test/spiretest" + "github.com/spiffe/spire/test/testkey" + "github.com/spiffe/spire/test/util" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" +) + +var ( + trustDomain = spiffeid.RequireTrustDomainFromString("example.org") +) + +type fakeEjbcaAuthenticator struct { + client *http.Client +} + +// GetHttpClient implements ejbcaclient.Authenticator +func (f *fakeEjbcaAuthenticator) GetHTTPClient() (*http.Client, error) { + return f.client, nil +} + +type fakeClientConfig struct { + testServer *httptest.Server +} + +func (f *fakeClientConfig) newFakeAuthenticator(_ *Config) (ejbcaclient.Authenticator, error) { + return &fakeEjbcaAuthenticator{ + client: f.testServer.Client(), + }, nil +} + +func TestConfigure(t *testing.T) { + ca, _, err := util.LoadCAFixture() + require.NoError(t, err) + + caPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: ca.Raw}) + + cert, key, err := util.LoadSVIDFixture() + require.NoError(t, err) + + certPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + + keyByte, err := x509.MarshalECPrivateKey(key) + require.NoError(t, err) + keyPem := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyByte}) + + for i, tt := range []struct { + name string + getEnv getEnvFunc + readFile readFileFunc + config string + + expectedgRPCCode codes.Code + expectedMessagePrefix string + }{ + { + name: "No Hostname", + config: ` + ca_name = "Fake-Sub-CA" + client_cert_path = "/path/to/cert.crt" + client_cert_key_path = "/path/to/key.pem" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.InvalidArgument, + }, + { + name: "No CA Name", + config: ` + hostname = "ejbca.example.org" + client_cert_path = "/path/to/cert.crt" + client_cert_key_path = "/path/to/key.pem" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.InvalidArgument, + }, + { + name: "No End Entity Profile Name", + config: ` + hostname = "ejbca.example.org" + client_cert_path = "/path/to/cert.crt" + client_cert_key_path = "/path/to/key.pem" + ca_name = "Fake-Sub-CA" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.InvalidArgument, + }, + { + name: "No Certificate Profile Name", + config: ` + hostname = "ejbca.example.org" + client_cert_path = "/path/to/cert.crt" + client_cert_key_path = "/path/to/key.pem" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.InvalidArgument, + }, + { + name: "No Client Cert", + config: ` + hostname = "ejbca.example.org" + client_cert_key_path = "/path/to/key.pem" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.InvalidArgument, + }, + { + name: "No Client Key", + config: ` + hostname = "ejbca.example.org" + client_cert_path = "/path/to/cert.crt" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.InvalidArgument, + }, + { + name: "CA Cert path from env", + config: ` + hostname = "ejbca.example.org" + client_cert_path = "/path/to/cert.crt" + client_cert_key_path = "/path/to/key.pem" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: func(key string) string { + if key == "EJBCA_CA_CERT_PATH" { + return "/path/to/ca.crt" + } + return "" + }, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/ca.crt" { + return caPem, nil + } + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.OK, + }, + { + name: "Client Cert path from env", + config: ` + hostname = "ejbca.example.org" + client_cert_key_path = "/path/to/key.pem" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: func(key string) string { + if key == "EJBCA_CLIENT_CERT_PATH" { + return "/path/to/cert.crt" + } + return "" + }, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.OK, + }, + { + name: "Client Key path from env", + config: ` + hostname = "ejbca.example.org" + client_cert_path = "/path/to/cert.crt" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: func(key string) string { + if key == "EJBCA_CLIENT_CERT_KEY_PATH" { + return "/path/to/key.pem" + } + return "" + }, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.OK, + }, + { + name: "CA, Client Cert, and Client Key path from env", + config: ` + hostname = "ejbca.example.org" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: func(key string) string { + if key == "EJBCA_CA_CERT_PATH" { + return "/path/to/ca.crt" + } + if key == "EJBCA_CLIENT_CERT_PATH" { + return "/path/to/cert.crt" + } + if key == "EJBCA_CLIENT_CERT_KEY_PATH" { + return "/path/to/key.pem" + } + return "" + }, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/ca.crt" { + return caPem, nil + } + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.OK, + }, + { + name: "CA, Client Cert, and Client Key path from config", + config: ` + hostname = "ejbca.example.org" + ca_cert_path = "/path/to/ca.crt" + client_cert_path = "/path/to/cert.crt" + client_cert_key_path = "/path/to/key.pem" + ca_name = "Fake-Sub-CA" + end_entity_profile_name = "fakeSpireIntermediateCAEEP" + certificate_profile_name = "fakeSubCACP" + default_end_entity_name = "cn" + account_binding_id = "spiffe://example.org/spire/agent/join_token/abcd" + `, + getEnv: os.Getenv, + readFile: func(key string) ([]byte, error) { + if key == "/path/to/ca.crt" { + return caPem, nil + } + if key == "/path/to/cert.crt" { + return certPem, nil + } + if key == "/path/to/key.pem" { + return keyPem, nil + } + return nil, errors.New("file not found") + }, + expectedgRPCCode: codes.OK, + }, + } { + t.Run(tt.name, func(t *testing.T) { + var err error + p := New() + + p.hooks.getEnv = tt.getEnv + p.hooks.readFile = tt.readFile + + options := []plugintest.Option{ + plugintest.CaptureConfigureError(&err), + plugintest.Configure(tt.config), + } + + plugintest.Load(t, builtin(p), new(upstreamauthority.V1), options...) + spiretest.RequireGRPCStatusHasPrefix(t, err, tt.expectedgRPCCode, tt.expectedMessagePrefix) + t.Logf("\ntestcase[%d] and err:%+v\n", i, err) + }) + } +} + +func TestMintX509CAAndSubscribe(t *testing.T) { + rootCA, _, err := util.LoadCAFixture() + require.NoError(t, err) + intermediateCA, _, err := util.LoadCAFixture() + require.NoError(t, err) + svidIssuingCA, _, err := util.LoadSVIDFixture() + require.NoError(t, err) + + for _, tt := range []struct { + name string + + // Config + certificateResponseFormat string + ejbcaStatusCode int + + // Request + caName string + endEntityProfileName string + certificateProfileName string + endEntityName string + accountBindingID string + + // Expected values + expectedgRPCCode codes.Code + expectedMessagePrefix string + expectedEndEntityName string + expectedCaAndChain []*x509.Certificate + expectedRootCAs []*x509.Certificate + }{ + { + name: "success_pem", + + certificateResponseFormat: "PEM", + ejbcaStatusCode: http.StatusOK, + + caName: "Fake-Sub-CA", + endEntityProfileName: "fakeSpireIntermediateCAEEP", + certificateProfileName: "fakeSubCACP", + endEntityName: "", + accountBindingID: "", + + expectedgRPCCode: codes.OK, + expectedMessagePrefix: "", + expectedEndEntityName: trustDomain.ID().String(), + expectedCaAndChain: []*x509.Certificate{svidIssuingCA, intermediateCA}, + expectedRootCAs: []*x509.Certificate{rootCA}, + }, + { + name: "success_der", + + certificateResponseFormat: "DER", + ejbcaStatusCode: http.StatusOK, + + caName: "Fake-Sub-CA", + endEntityProfileName: "fakeSpireIntermediateCAEEP", + certificateProfileName: "fakeSubCACP", + endEntityName: "", + accountBindingID: "", + + expectedgRPCCode: codes.OK, + expectedMessagePrefix: "", + expectedEndEntityName: trustDomain.ID().String(), + expectedCaAndChain: []*x509.Certificate{svidIssuingCA, intermediateCA}, + expectedRootCAs: []*x509.Certificate{rootCA}, + }, + { + name: "fail_unknown_format", + + certificateResponseFormat: "PKCS7", + + caName: "Fake-Sub-CA", + endEntityProfileName: "fakeSpireIntermediateCAEEP", + certificateProfileName: "fakeSubCACP", + endEntityName: "", + accountBindingID: "", + + expectedgRPCCode: codes.Internal, + expectedMessagePrefix: "upstreamauthority(ejbca): ejbca returned unsupported certificate format: \"PKCS7\"", + ejbcaStatusCode: http.StatusOK, + expectedEndEntityName: trustDomain.ID().String(), + expectedCaAndChain: []*x509.Certificate{svidIssuingCA, intermediateCA}, + expectedRootCAs: []*x509.Certificate{rootCA}, + }, + { + name: "success_ejbca_api_error", + + certificateResponseFormat: "PEM", + ejbcaStatusCode: http.StatusBadRequest, + + caName: "Fake-Sub-CA", + endEntityProfileName: "fakeSpireIntermediateCAEEP", + certificateProfileName: "fakeSubCACP", + endEntityName: "", + accountBindingID: "", + + expectedgRPCCode: codes.Internal, + expectedMessagePrefix: "upstreamauthority(ejbca): EJBCA returned an error: failed to enroll CSR - 400 Bad Request - EJBCA API returned error", + expectedEndEntityName: trustDomain.ID().String(), + expectedCaAndChain: []*x509.Certificate{svidIssuingCA, intermediateCA}, + expectedRootCAs: []*x509.Certificate{rootCA}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + var err error + + testServer := httptest.NewTLSServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + enrollRestRequest := ejbcaclient.EnrollCertificateRestRequest{} + err := json.NewDecoder(r.Body).Decode(&enrollRestRequest) + require.NoError(t, err) + + // Perform assertions before fake enrollment + require.Equal(t, tt.caName, enrollRestRequest.GetCertificateAuthorityName()) + require.Equal(t, tt.endEntityProfileName, enrollRestRequest.GetEndEntityProfileName()) + require.Equal(t, tt.certificateProfileName, enrollRestRequest.GetCertificateProfileName()) + require.Equal(t, tt.accountBindingID, enrollRestRequest.GetAccountBindingId()) + require.Equal(t, tt.expectedEndEntityName, enrollRestRequest.GetUsername()) + + response := certificateRestResponseFromExpectedCerts(t, tt.expectedCaAndChain, tt.expectedRootCAs, tt.certificateResponseFormat) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(tt.ejbcaStatusCode) + err = json.NewEncoder(w).Encode(response) + require.NoError(t, err) + })) + defer testServer.Close() + + p := New() + ua := new(upstreamauthority.V1) + + clientConfig := fakeClientConfig{ + testServer: testServer, + } + p.hooks.newAuthenticator = clientConfig.newFakeAuthenticator + + config := &Config{ + Hostname: testServer.URL, + + // We populate the client cert path & client key path to random values since newFakeAuthenticator doesn't have + // any built-in authentication. + ClientCertPath: "/path/to/cert.crt", + ClientCertKeyPath: "/path/to/key.pem", + + CAName: tt.caName, + EndEntityProfileName: tt.endEntityProfileName, + CertificateProfileName: tt.certificateProfileName, + DefaultEndEntityName: tt.endEntityName, + AccountBindingID: tt.accountBindingID, + } + + options := []plugintest.Option{ + plugintest.CaptureConfigureError(&err), + plugintest.ConfigureJSON(config), + } + + plugintest.Load(t, builtin(p), ua, options...) + require.NoError(t, err) + + priv := testkey.NewEC384(t) + csr, err := commonutil.MakeCSR(priv, trustDomain.ID()) + require.NoError(t, err) + + ctx := context.Background() + caAndChain, rootCAs, stream, err := ua.MintX509CA(ctx, csr, 30*time.Second) + spiretest.RequireGRPCStatusHasPrefix(t, err, tt.expectedgRPCCode, tt.expectedMessagePrefix) + if tt.expectedgRPCCode == codes.OK { + require.NotNil(t, stream) + require.NotNil(t, caAndChain) + require.NotNil(t, rootCAs) + } + }) + } +} + +func certificateRestResponseFromExpectedCerts(t *testing.T, issuingCaAndChain []*x509.Certificate, rootCAs []*x509.Certificate, format string) *ejbcaclient.CertificateRestResponse { + require.NotEqual(t, 0, len(issuingCaAndChain)) + var issuingCa string + if format == "PEM" { + issuingCa = string(pem.EncodeToMemory(&pem.Block{Bytes: issuingCaAndChain[0].Raw, Type: "CERTIFICATE"})) + } else { + issuingCa = base64.StdEncoding.EncodeToString(issuingCaAndChain[0].Raw) + } + + var caChain []string + if format == "PEM" { + for _, cert := range issuingCaAndChain[1:] { + caChain = append(caChain, string(pem.EncodeToMemory(&pem.Block{Bytes: cert.Raw, Type: "CERTIFICATE"}))) + } + for _, cert := range rootCAs { + caChain = append(caChain, string(pem.EncodeToMemory(&pem.Block{Bytes: cert.Raw, Type: "CERTIFICATE"}))) + } + } else { + for _, cert := range issuingCaAndChain[1:] { + caChain = append(caChain, base64.StdEncoding.EncodeToString(cert.Raw)) + } + for _, cert := range rootCAs { + caChain = append(caChain, base64.StdEncoding.EncodeToString(cert.Raw)) + } + } + + response := &ejbcaclient.CertificateRestResponse{} + response.SetResponseFormat(format) + response.SetCertificate(issuingCa) + response.SetCertificateChain(caChain) + return response +} + +func TestGetEndEntityName(t *testing.T) { + for _, tt := range []struct { + name string + + defaultEndEntityName string + + subject string + dnsNames []string + uris []string + ips []string + + expectedEndEntityName string + }{ + { + name: "defaultEndEntityName unset use cn", + defaultEndEntityName: "", + subject: "CN=purplecat.example.com", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "purplecat.example.com", + }, + { + name: "defaultEndEntityName unset use dns", + defaultEndEntityName: "", + subject: "", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "reddog.example.com", + }, + { + name: "defaultEndEntityName unset use uri", + defaultEndEntityName: "", + subject: "", + dnsNames: []string{""}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "https://blueelephant.example.com", + }, + { + name: "defaultEndEntityName unset use ip", + defaultEndEntityName: "", + subject: "", + dnsNames: []string{""}, + uris: []string{""}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "192.168.1.1", + }, + { + name: "defaultEndEntityName set use cn", + defaultEndEntityName: "cn", + subject: "CN=purplecat.example.com", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "purplecat.example.com", + }, + { + name: "defaultEndEntityName set use dns", + defaultEndEntityName: "dns", + subject: "CN=purplecat.example.com", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "reddog.example.com", + }, + { + name: "defaultEndEntityName set use uri", + defaultEndEntityName: "uri", + subject: "CN=purplecat.example.com", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "https://blueelephant.example.com", + }, + { + name: "defaultEndEntityName set use ip", + defaultEndEntityName: "ip", + subject: "CN=purplecat.example.com", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "192.168.1.1", + }, + { + name: "defaultEndEntityName set use custom", + defaultEndEntityName: "aNonStandardValue", + subject: "CN=purplecat.example.com", + dnsNames: []string{"reddog.example.com"}, + uris: []string{"https://blueelephant.example.com"}, + ips: []string{"192.168.1.1"}, + + expectedEndEntityName: "aNonStandardValue", + }, + } { + t.Run(tt.name, func(t *testing.T) { + config := &Config{ + Hostname: "ejbca.example.com", + ClientCertPath: "/path/to/cert.crt", + ClientCertKeyPath: "/path/to/key.pem", + CAName: "Fake-Sub-CA", + EndEntityProfileName: "fakeSpireIntermediateCAEEP", + CertificateProfileName: "fakeSubCACP", + DefaultEndEntityName: tt.defaultEndEntityName, + AccountBindingID: "", + } + + csr, err := generateCSR(tt.subject, tt.dnsNames, tt.uris, tt.ips) + require.NoError(t, err) + + p := New() + + logOptions := hclog.DefaultOptions + logOptions.Level = hclog.Debug + p.SetLogger(hclog.Default()) + + endEntityName, err := p.getEndEntityName(config, csr) + require.NoError(t, err) + require.Equal(t, tt.expectedEndEntityName, endEntityName) + }) + } +} + +func generateCSR(subject string, dnsNames []string, uris []string, ipAddresses []string) (*x509.CertificateRequest, error) { + keyBytes, _ := rsa.GenerateKey(rand.Reader, 2048) + + var name pkix.Name + + if subject != "" { + // Split the subject into its individual parts + parts := strings.Split(subject, ",") + + for _, part := range parts { + // Split the part into key and value + keyValue := strings.SplitN(part, "=", 2) + + if len(keyValue) != 2 { + return nil, errors.New("invalid subject") + } + + key := strings.TrimSpace(keyValue[0]) + value := strings.TrimSpace(keyValue[1]) + + // Map the key to the appropriate field in the pkix.Name struct + switch key { + case "C": + name.Country = []string{value} + case "ST": + name.Province = []string{value} + case "L": + name.Locality = []string{value} + case "O": + name.Organization = []string{value} + case "OU": + name.OrganizationalUnit = []string{value} + case "CN": + name.CommonName = value + default: + // Ignore any unknown keys + } + } + } + + template := x509.CertificateRequest{ + Subject: name, + SignatureAlgorithm: x509.SHA256WithRSA, + } + + if len(dnsNames) > 0 { + template.DNSNames = dnsNames + } + + // Parse and add URIs + var uriPointers []*url.URL + for _, u := range uris { + if u == "" { + continue + } + uriPointer, err := url.Parse(u) + if err != nil { + return nil, err + } + uriPointers = append(uriPointers, uriPointer) + } + template.URIs = uriPointers + + // Parse and add IPAddresses + var ipAddrs []net.IP + for _, ipStr := range ipAddresses { + if ipStr == "" { + continue + } + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid IP address: %s", ipStr) + } + ipAddrs = append(ipAddrs, ip) + } + template.IPAddresses = ipAddrs + + // Generate the CSR + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &template, keyBytes) + if err != nil { + return nil, err + } + + parsedCSR, err := x509.ParseCertificateRequest(csrBytes) + if err != nil { + return nil, err + } + + return parsedCSR, nil +} diff --git a/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas.go b/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas.go index 865cee5041f..15891380d98 100644 --- a/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas.go +++ b/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas.go @@ -333,7 +333,7 @@ func (p *Plugin) mintX509CA(ctx context.Context, csr []byte, preferredTTL int32) fullChain := []*x509.Certificate{cert} fullChain = append(fullChain, certChain[:len(certChain)-1]...) - x509CAChain, err := x509certificate.ToPluginProtos(fullChain) + x509CAChain, err := x509certificate.ToPluginFromCertificates(fullChain) if err != nil { return nil, status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } @@ -358,7 +358,7 @@ func (p *Plugin) mintX509CA(ctx context.Context, csr []byte, preferredTTL int32) // We may well have specified multiple paths to the same root. rootBundle = x509util.DedupeCertificates(rootBundle) - upstreamX509Roots, err := x509certificate.ToPluginProtos(rootBundle) + upstreamX509Roots, err := x509certificate.ToPluginFromCertificates(rootBundle) if err != nil { return nil, status.Errorf(codes.Internal, "unable to form response upstream X.509 roots: %v", err) } diff --git a/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas_test.go b/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas_test.go index c672cfca1e0..8c5de8a7da7 100644 --- a/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas_test.go +++ b/pkg/server/plugin/upstreamauthority/gcpcas/gcpcas_test.go @@ -158,11 +158,11 @@ func TestGcpCAS(t *testing.T) { require.NotNil(t, x509Authorities) // Confirm that we don't have unexpected CAs require.Equal(t, 2, len(x509Authorities)) - require.Equal(t, "caX", x509Authorities[0].Subject.CommonName) - require.Equal(t, "caX", x509Authorities[0].Issuer.CommonName) + require.Equal(t, "caX", x509Authorities[0].Certificate.Subject.CommonName) + require.Equal(t, "caX", x509Authorities[0].Certificate.Issuer.CommonName) // We intentionally return the root externalcaY rather than intermediate caZ - require.Equal(t, "externalcaY", x509Authorities[1].Subject.CommonName) - require.Equal(t, "externalcaY", x509Authorities[1].Issuer.CommonName) + require.Equal(t, "externalcaY", x509Authorities[1].Certificate.Subject.CommonName) + require.Equal(t, "externalcaY", x509Authorities[1].Certificate.Issuer.CommonName) require.NotNil(t, x509CA) require.Equal(t, 1, len(x509CA)) @@ -170,8 +170,8 @@ func TestGcpCAS(t *testing.T) { require.Equal(t, "caX", x509CA[0].Issuer.CommonName) rootPool := x509.NewCertPool() - rootPool.AddCert(x509Authorities[0]) - rootPool.AddCert(x509Authorities[1]) + rootPool.AddCert(x509Authorities[0].Certificate) + rootPool.AddCert(x509Authorities[1].Certificate) var opt x509.VerifyOptions opt.Roots = rootPool res, err := x509CA[0].Verify(opt) diff --git a/pkg/server/plugin/upstreamauthority/spire/fake_handlers_test.go b/pkg/server/plugin/upstreamauthority/spire/fake_handlers_test.go index bc2444c3c35..7e127a558bf 100644 --- a/pkg/server/plugin/upstreamauthority/spire/fake_handlers_test.go +++ b/pkg/server/plugin/upstreamauthority/spire/fake_handlers_test.go @@ -66,10 +66,10 @@ type testHandler struct { sAPIServer *handler } -func (h *testHandler) startTestServers(t *testing.T, ca *testca.CA, serverCert []*x509.Certificate, serverKey crypto.Signer, +func (h *testHandler) startTestServers(t *testing.T, clk clock.Clock, ca *testca.CA, serverCert []*x509.Certificate, serverKey crypto.Signer, svidCert []byte, svidKey []byte) { h.wAPIServer = &whandler{cert: serverCert, key: serverKey, ca: ca, svidCert: svidCert, svidKey: svidKey} - h.sAPIServer = &handler{cert: serverCert, key: serverKey, ca: ca} + h.sAPIServer = &handler{clock: clk, cert: serverCert, key: serverKey, ca: ca} h.sAPIServer.startServerAPITestServer(t) h.wAPIServer.startWAPITestServer(t) } @@ -166,7 +166,7 @@ func (h *handler) appendKey(key *types.JWTKey) *types.Bundle { return cloneBundle(h.bundle) } -func (h *handler) appendRootCA(rootCA *types.X509Certificate) *types.Bundle { +func (h *handler) appendRootCA(rootCA *types.X509Certificate) *types.Bundle { //nolint: unparam // Keeping return for future use h.mtx.Lock() defer h.mtx.Unlock() h.bundle.X509Authorities = append(h.bundle.X509Authorities, rootCA) diff --git a/pkg/server/plugin/upstreamauthority/spire/spire.go b/pkg/server/plugin/upstreamauthority/spire/spire.go index 1df3d7513a7..7d2a849150b 100644 --- a/pkg/server/plugin/upstreamauthority/spire/spire.go +++ b/pkg/server/plugin/upstreamauthority/spire/spire.go @@ -30,8 +30,6 @@ const ( internalPollFreq = time.Second ) -var clk clock.Clock = clock.New() - type Configuration struct { ServerAddr string `hcl:"server_address" json:"server_address"` ServerPort string `hcl:"server_port" json:"server_port"` @@ -58,6 +56,7 @@ type Plugin struct { upstreamauthorityv1.UnsafeUpstreamAuthorityServer configv1.UnsafeConfigServer + clk clock.Clock log hclog.Logger mtx sync.RWMutex @@ -78,6 +77,7 @@ type Plugin struct { func New() *Plugin { return &Plugin{ + clk: clock.New(), currentBundle: &plugintypes.Bundle{}, } } @@ -139,19 +139,21 @@ func (p *Plugin) MintX509CAAndSubscribe(request *upstreamauthorityv1.MintX509CAR } defer p.unsubscribeToPolling() - certChain, roots, err := p.serverClient.newDownstreamX509CA(stream.Context(), request.Csr, request.PreferredTtl) + // TODO: downstream RPC is not returning authority metadata, like tainted bit + // avoid using it for now in favor of a call to get bundle RPC + certChain, _, err := p.serverClient.newDownstreamX509CA(stream.Context(), request.Csr, request.PreferredTtl) if err != nil { return status.Errorf(codes.Internal, "unable to request a new Downstream X509CA: %v", err) } - var bundles []*plugintypes.X509Certificate - for _, cert := range roots { - pluginCert, err := x509certificate.ToPluginProto(cert) - if err != nil { - return status.Errorf(codes.Internal, "failed to parse X.509 authorities: %v", err) - } + serverBundle, err := p.serverClient.getBundle(stream.Context()) + if err != nil { + return status.Errorf(codes.Internal, "failed to fetch bundle from upstream server: %v", err) + } - bundles = append(bundles, pluginCert) + bundles, err := x509certificate.ToPluginFromAPIProtos(serverBundle.X509Authorities) + if err != nil { + return status.Errorf(codes.Internal, "failed to parse X.509 authorities: %v", err) } // Set X509 Authorities @@ -159,12 +161,12 @@ func (p *Plugin) MintX509CAAndSubscribe(request *upstreamauthorityv1.MintX509CAR rootCAs := []*plugintypes.X509Certificate{} - x509CAChain, err := x509certificate.ToPluginProtos(certChain) + x509CAChain, err := x509certificate.ToPluginFromCertificates(certChain) if err != nil { return status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } - ticker := clk.Ticker(internalPollFreq) + ticker := p.clk.Ticker(internalPollFreq) defer ticker.Stop() for { newRootCAs := p.getBundle().X509Authorities @@ -222,7 +224,7 @@ func (p *Plugin) PublishJWTKeyAndSubscribe(req *upstreamauthorityv1.PublishJWTKe p.setBundleJWTAuthorities(jwtKeys) keys := []*plugintypes.JWTKey{} - ticker := clk.Ticker(internalPollFreq) + ticker := p.clk.Ticker(internalPollFreq) defer ticker.Stop() for { newKeys := p.getBundle().JwtAuthorities @@ -246,7 +248,7 @@ func (p *Plugin) PublishJWTKeyAndSubscribe(req *upstreamauthorityv1.PublishJWTKe } func (p *Plugin) pollBundleUpdates(ctx context.Context) { - ticker := clk.Ticker(upstreamPollFreq) + ticker := p.clk.Ticker(upstreamPollFreq) defer ticker.Stop() for { preFetchCallVersion := p.getBundleVersion() diff --git a/pkg/server/plugin/upstreamauthority/spire/spire_test.go b/pkg/server/plugin/upstreamauthority/spire/spire_test.go index 7b1fa2a753f..86c147e3b3b 100644 --- a/pkg/server/plugin/upstreamauthority/spire/spire_test.go +++ b/pkg/server/plugin/upstreamauthority/spire/spire_test.go @@ -13,6 +13,7 @@ import ( svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" "github.com/spiffe/spire-api-sdk/proto/spire/api/types" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/common/cryptoutil" "github.com/spiffe/spire/pkg/common/x509svid" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" @@ -140,6 +141,31 @@ func TestMintX509CA(t *testing.T) { serverCertUpdate, _ := ca.CreateX509Certificate( testca.WithID(spiffeid.RequireFromPath(trustDomain, "/another")), ) + serverCertUpdateTainted, _ := ca.CreateX509Certificate( + testca.WithID(spiffeid.RequireFromPath(trustDomain, "/another")), + ) + expectedServerUpdateAuthority := []*x509certificate.X509Authority{ + { + Certificate: serverCertUpdate[0], + }, + { + Certificate: serverCertUpdateTainted[0], + Tainted: true, + }, + } + + certToAuthority := func(certs []*x509.Certificate) []*x509certificate.X509Authority { + var authorities []*x509certificate.X509Authority + for _, eachCert := range certs { + authorities = append(authorities, &x509certificate.X509Authority{ + Certificate: eachCert, + }) + } + return authorities + } + // TODO: since now we can taint authorities may we add this feature + // to go-spiffe? + expectedX509Authorities := certToAuthority(ca.Bundle().X509Authorities()) csr, pubKey, err := util.NewCSRTemplate(trustDomain.IDString()) require.NoError(t, err) @@ -237,9 +263,11 @@ func TestMintX509CA(t *testing.T) { for _, c := range cases { c := c t.Run(c.name, func(t *testing.T) { + mockClock := clock.NewMock(t) + // Setup servers server := testHandler{} - server.startTestServers(t, ca, serverCert, serverKey, svidCert, svidKey) + server.startTestServers(t, mockClock, ca, serverCert, serverKey, svidCert, svidKey) server.sAPIServer.setError(c.sAPIError) server.sAPIServer.setDownstreamResponse(c.downstreamResp) @@ -252,7 +280,7 @@ func TestMintX509CA(t *testing.T) { workloadAPIAddr = c.customWorkloadAPIAddr } - ua, mockClock := newWithDefault(t, serverAddr, workloadAPIAddr) + ua := newWithDefault(t, mockClock, serverAddr, workloadAPIAddr) server.sAPIServer.clock = mockClock // Send initial request and get stream @@ -270,7 +298,7 @@ func TestMintX509CA(t *testing.T) { return } - require.Equal(t, ca.X509Bundle().X509Authorities(), x509Authorities) + require.Equal(t, expectedX509Authorities, x509Authorities) wantTTL := c.ttl if wantTTL == 0 { @@ -289,6 +317,7 @@ func TestMintX509CA(t *testing.T) { // the upstream poll frequency twice to ensure the plugin picks up // the change to the bundle. server.sAPIServer.appendRootCA(&types.X509Certificate{Asn1: serverCertUpdate[0].Raw}) + server.sAPIServer.appendRootCA(&types.X509Certificate{Asn1: serverCertUpdateTainted[0].Raw, Tainted: true}) mockClock.Add(upstreamPollFreq) mockClock.Add(upstreamPollFreq) mockClock.Add(internalPollFreq) @@ -297,8 +326,7 @@ func TestMintX509CA(t *testing.T) { bundleUpdateResp, err := stream.RecvUpstreamX509Authorities() require.NoError(t, err) - expectBundles := append(ca.X509Authorities(), serverCertUpdate...) - require.Equal(t, expectBundles, bundleUpdateResp) + require.Equal(t, append(expectedX509Authorities, expectedServerUpdateAuthority...), bundleUpdateResp) // Cancel ctx to stop getting updates cancel() @@ -331,9 +359,10 @@ func TestPublishJWTKey(t *testing.T) { require.NoError(t, err) // Setup servers + mockClock := clock.NewMock(t) server := testHandler{} - server.startTestServers(t, ca, serverCert, serverKey, svidCert, svidKey) - ua, mockClock := newWithDefault(t, server.sAPIServer.addr, server.wAPIServer.workloadAPIAddr) + server.startTestServers(t, mockClock, ca, serverCert, serverKey, svidCert, svidKey) + ua := newWithDefault(t, mockClock, server.sAPIServer.addr, server.wAPIServer.workloadAPIAddr) // Get first response ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -385,7 +414,7 @@ func TestPublishJWTKey(t *testing.T) { spiretest.RequireGRPCStatusHasPrefix(t, err, codes.Internal, "upstreamauthority(spire): failed to push JWT authority: rpc error: code = Unknown desc = some erro") } -func newWithDefault(t *testing.T, serverAddr string, workloadAPIAddr net.Addr) (*upstreamauthority.V1, *clock.Mock) { +func newWithDefault(t *testing.T, mockClock *clock.Mock, serverAddr string, workloadAPIAddr net.Addr) *upstreamauthority.V1 { host, port, _ := net.SplitHostPort(serverAddr) config := Configuration{ ServerAddr: host, @@ -393,19 +422,18 @@ func newWithDefault(t *testing.T, serverAddr string, workloadAPIAddr net.Addr) ( } setWorkloadAPIAddr(&config, workloadAPIAddr) + p := New() + p.clk = mockClock + ua := new(upstreamauthority.V1) - plugintest.Load(t, BuiltIn(), ua, + plugintest.Load(t, builtin(p), ua, plugintest.CoreConfig(catalog.CoreConfig{ TrustDomain: trustDomain, }), plugintest.ConfigureJSON(config), ) - mockClock := clock.NewMock(t) - - clk = mockClock - - return ua, mockClock + return ua } func certChainURIs(chain []*x509.Certificate) []string { diff --git a/pkg/server/plugin/upstreamauthority/upstreamauthority.go b/pkg/server/plugin/upstreamauthority/upstreamauthority.go index abfa2f99718..a35257c2b2f 100644 --- a/pkg/server/plugin/upstreamauthority/upstreamauthority.go +++ b/pkg/server/plugin/upstreamauthority/upstreamauthority.go @@ -6,6 +6,7 @@ import ( "time" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/proto/spire/common" ) @@ -20,7 +21,7 @@ type UpstreamAuthority interface { // closed when the caller is no longer interested in updates. If the // upstream authority does not support streaming updates, the stream will // return io.EOF when called. - MintX509CA(ctx context.Context, csr []byte, preferredTTL time.Duration) (x509CA, upstreamX509Authorities []*x509.Certificate, stream UpstreamX509AuthorityStream, err error) + MintX509CA(ctx context.Context, csr []byte, preferredTTL time.Duration) (x509CA []*x509.Certificate, upstreamX509Authorities []*x509certificate.X509Authority, stream UpstreamX509AuthorityStream, err error) // PublishJWTKey publishes the given JWT key with the upstream authority. // Support for this method is optional. Implementations that do not support @@ -39,7 +40,7 @@ type UpstreamX509AuthorityStream interface { // method is called, or the context originally passed into MintX509CA is // canceled. If the function returns an error, no more updates will be // available over the stream. - RecvUpstreamX509Authorities() ([]*x509.Certificate, error) + RecvUpstreamX509Authorities() ([]*x509certificate.X509Authority, error) // Close() closes the stream. It MUST be called by callers of MintX509CA // when they are done with the stream. diff --git a/pkg/server/plugin/upstreamauthority/v1.go b/pkg/server/plugin/upstreamauthority/v1.go index 9ecf7eb6db2..74446718586 100644 --- a/pkg/server/plugin/upstreamauthority/v1.go +++ b/pkg/server/plugin/upstreamauthority/v1.go @@ -23,7 +23,7 @@ type V1 struct { // MintX509CA provides the V1 implementation of the UpstreamAuthority // interface method of the same name. -func (v1 *V1) MintX509CA(ctx context.Context, csr []byte, preferredTTL time.Duration) (_, _ []*x509.Certificate, _ UpstreamX509AuthorityStream, err error) { +func (v1 *V1) MintX509CA(ctx context.Context, csr []byte, preferredTTL time.Duration) (_ []*x509.Certificate, _ []*x509certificate.X509Authority, _ UpstreamX509AuthorityStream, err error) { ctx, cancel := context.WithCancel(ctx) defer func() { // Only cancel the context if the function fails. Otherwise the @@ -51,6 +51,7 @@ func (v1 *V1) MintX509CA(ctx context.Context, csr []byte, preferredTTL time.Dura return nil, nil, nil, err } + // TODO: may we add a new type to get upstream authority with metadata? return x509CA, upstreamX509Authorities, &v1UpstreamX509AuthorityStream{v1: v1, stream: stream, cancel: cancel}, nil } @@ -91,7 +92,7 @@ func (v1 *V1) PublishJWTKey(ctx context.Context, jwtKey *common.PublicKey) (_ [] return jwtKeys, &v1UpstreamJWTAuthorityStream{v1: v1, stream: stream, cancel: cancel}, nil } -func (v1 *V1) parseMintX509CAFirstResponse(resp *upstreamauthorityv1.MintX509CAResponse) ([]*x509.Certificate, []*x509.Certificate, error) { +func (v1 *V1) parseMintX509CAFirstResponse(resp *upstreamauthorityv1.MintX509CAResponse) ([]*x509.Certificate, []*x509certificate.X509Authority, error) { x509CA, err := x509certificate.FromPluginProtos(resp.X509CaChain) if err != nil { return nil, nil, v1.Errorf(codes.Internal, "plugin response has malformed X.509 CA chain: %v", err) @@ -99,21 +100,27 @@ func (v1 *V1) parseMintX509CAFirstResponse(resp *upstreamauthorityv1.MintX509CAR if len(x509CA) == 0 { return nil, nil, v1.Error(codes.Internal, "plugin response missing X.509 CA chain") } + + intermediateAuthorities := make([]*x509.Certificate, 0, len(x509CA)) + for _, eachCA := range x509CA { + intermediateAuthorities = append(intermediateAuthorities, eachCA.Certificate) + } + x509Authorities, err := v1.parseX509Authorities(resp.UpstreamX509Roots) if err != nil { return nil, nil, err } - return x509CA, x509Authorities, nil + return intermediateAuthorities, x509Authorities, nil } -func (v1 *V1) parseMintX509CABundleUpdate(resp *upstreamauthorityv1.MintX509CAResponse) ([]*x509.Certificate, error) { +func (v1 *V1) parseMintX509CABundleUpdate(resp *upstreamauthorityv1.MintX509CAResponse) ([]*x509certificate.X509Authority, error) { if len(resp.X509CaChain) > 0 { return nil, v1.Error(codes.Internal, "plugin response has an X.509 CA chain after the first response") } return v1.parseX509Authorities(resp.UpstreamX509Roots) } -func (v1 *V1) parseX509Authorities(rawX509Authorities []*types.X509Certificate) ([]*x509.Certificate, error) { +func (v1 *V1) parseX509Authorities(rawX509Authorities []*types.X509Certificate) ([]*x509certificate.X509Authority, error) { x509Authorities, err := x509certificate.FromPluginProtos(rawX509Authorities) if err != nil { return nil, v1.Errorf(codes.Internal, "plugin response has malformed upstream X.509 roots: %v", err) @@ -145,7 +152,7 @@ type v1UpstreamX509AuthorityStream struct { cancel context.CancelFunc } -func (s *v1UpstreamX509AuthorityStream) RecvUpstreamX509Authorities() ([]*x509.Certificate, error) { +func (s *v1UpstreamX509AuthorityStream) RecvUpstreamX509Authorities() ([]*x509certificate.X509Authority, error) { for { resp, err := s.stream.Recv() switch { diff --git a/pkg/server/plugin/upstreamauthority/v1_test.go b/pkg/server/plugin/upstreamauthority/v1_test.go index f1af3e28cd5..159001b0c06 100644 --- a/pkg/server/plugin/upstreamauthority/v1_test.go +++ b/pkg/server/plugin/upstreamauthority/v1_test.go @@ -45,13 +45,23 @@ func TestV1MintX509CA(t *testing.T) { x509CA := upstreamCA.ChildCA() expectedX509CAChain := x509CA.X509Authorities() - expectedUpstreamX509Roots := upstreamCA.X509Authorities() + var expectedUpstreamX509Roots []*x509certificate.X509Authority + for _, eachCert := range upstreamCA.X509Authorities() { + expectedUpstreamX509Roots = append(expectedUpstreamX509Roots, &x509certificate.X509Authority{ + Certificate: eachCert, + }) + } + taintedUpstreamX509Roots := []*x509certificate.X509Authority{ + { + Certificate: expectedUpstreamX509Roots[0].Certificate, + Tainted: true, + }, + } - validX509CAChain := x509certificate.RequireToPluginProtos(expectedX509CAChain) + validX509CAChain := x509certificate.RequireToPluginFromCertificates(expectedX509CAChain) validUpstreamX509Roots := x509certificate.RequireToPluginProtos(expectedUpstreamX509Roots) malformedX509CAChain := []*types.X509Certificate{{Asn1: []byte("OHNO")}} malformedUpstreamX509Roots := []*types.X509Certificate{{Asn1: []byte("OHNO")}} - withoutX509CAChain := &upstreamauthorityv1.MintX509CAResponse{ X509CaChain: nil, UpstreamX509Roots: validUpstreamX509Roots, @@ -72,18 +82,28 @@ func TestV1MintX509CA(t *testing.T) { X509CaChain: validX509CAChain, UpstreamX509Roots: validUpstreamX509Roots, } + withTaintedUpstreamX509Roots := &upstreamauthorityv1.MintX509CAResponse{ + X509CaChain: validX509CAChain, + UpstreamX509Roots: []*types.X509Certificate{ + { + Asn1: validUpstreamX509Roots[0].Asn1, + Tainted: true, + }, + }, + } builder := BuildV1() for _, tt := range []struct { - test string - builder *V1Builder - expectCode codes.Code - expectMessage string - expectStreamUpdates bool - expectStreamCode codes.Code - expectStreamMessage string - expectLogs []spiretest.LogEntry + test string + builder *V1Builder + expectCode codes.Code + expectMessage string + expectStreamUpdates bool + expectStreamCode codes.Code + expectStreamMessage string + expectLogs []spiretest.LogEntry + expectUpstreamX509RootsResponse []*x509certificate.X509Authority }{ { test: "plugin returns before sending first response", @@ -138,6 +158,15 @@ func TestV1MintX509CA(t *testing.T) { expectStreamCode: codes.OK, expectStreamMessage: "", }, + { + test: "success with tainted authority", + builder: builder. + WithMintX509CAResponse(withTaintedUpstreamX509Roots), + expectCode: codes.OK, + expectMessage: "", + expectStreamUpdates: false, + expectUpstreamX509RootsResponse: taintedUpstreamX509Roots, + }, { test: "second plugin response is bad (contains X.509 CA)", builder: builder. @@ -182,8 +211,12 @@ func TestV1MintX509CA(t *testing.T) { } require.NotNil(t, upstreamX509RootsStream, "stream should have been returned") defer upstreamX509RootsStream.Close() + expectUpstreamX509Roots := expectedUpstreamX509Roots + if tt.expectUpstreamX509RootsResponse != nil { + expectUpstreamX509Roots = tt.expectUpstreamX509RootsResponse + } assert.Equal(t, expectedX509CAChain, x509CA) - assert.Equal(t, expectedUpstreamX509Roots, upstreamX509Roots) + assert.Equal(t, expectUpstreamX509Roots, upstreamX509Roots) switch { case !tt.expectStreamUpdates: @@ -193,7 +226,11 @@ func TestV1MintX509CA(t *testing.T) { case tt.expectStreamCode == codes.OK: upstreamX509Roots, err = upstreamX509RootsStream.RecvUpstreamX509Authorities() assert.NoError(t, err, "stream should have returned update") - assert.Equal(t, expectedUpstreamX509Roots, upstreamX509Roots) + expected := expectUpstreamX509Roots + if tt.expectUpstreamX509RootsResponse != nil { + expected = tt.expectUpstreamX509RootsResponse + } + assert.Equal(t, expected, upstreamX509Roots) default: upstreamX509Roots, err = upstreamX509RootsStream.RecvUpstreamX509Authorities() spiretest.RequireGRPCStatusHasPrefix(t, err, tt.expectStreamCode, tt.expectStreamMessage) diff --git a/pkg/server/plugin/upstreamauthority/vault/vault.go b/pkg/server/plugin/upstreamauthority/vault/vault.go index 011df843313..05bb09d0574 100644 --- a/pkg/server/plugin/upstreamauthority/vault/vault.go +++ b/pkg/server/plugin/upstreamauthority/vault/vault.go @@ -221,7 +221,7 @@ func (p *Plugin) MintX509CAAndSubscribe(req *upstreamauthorityv1.MintX509CAReque return status.Errorf(codes.Internal, "failed to parse Root CA certificate: %v", err) } - upstreamX509Roots, err := x509certificate.ToPluginProtos([]*x509.Certificate{upstreamRoot}) + upstreamX509Roots, err := x509certificate.ToPluginFromCertificates([]*x509.Certificate{upstreamRoot}) if err != nil { return status.Errorf(codes.Internal, "unable to form response upstream X.509 roots: %v", err) } @@ -249,7 +249,7 @@ func (p *Plugin) MintX509CAAndSubscribe(req *upstreamauthorityv1.MintX509CAReque certChain = append(certChain, b) } - x509CAChain, err := x509certificate.ToPluginProtos(certChain) + x509CAChain, err := x509certificate.ToPluginFromCertificates(certChain) if err != nil { return status.Errorf(codes.Internal, "unable to form response X.509 CA chain: %v", err) } diff --git a/pkg/server/plugin/upstreamauthority/vault/vault_client_test.go b/pkg/server/plugin/upstreamauthority/vault/vault_client_test.go index d3f158a04ee..941cabca9d0 100644 --- a/pkg/server/plugin/upstreamauthority/vault/vault_client_test.go +++ b/pkg/server/plugin/upstreamauthority/vault/vault_client_test.go @@ -521,7 +521,7 @@ func TestConfigureTLSWithTokenAuth(t *testing.T) { testPool, err := testRootCAs() require.NoError(t, err) - require.Equal(t, testPool.Subjects(), tcc.RootCAs.Subjects()) //nolint:staticcheck // these pools are not system pools so the use of Subjects() is ok for now + require.Equal(t, testPool.Subjects(), tcc.RootCAs.Subjects()) } func TestConfigureTLSWithAppRoleAuth(t *testing.T) { @@ -543,7 +543,7 @@ func TestConfigureTLSWithAppRoleAuth(t *testing.T) { testPool, err := testRootCAs() require.NoError(t, err) - require.Equal(t, testPool.Subjects(), tcc.RootCAs.Subjects()) //nolint:staticcheck // these pools are not system pools so the use of Subjects() is ok for now + require.Equal(t, testPool.Subjects(), tcc.RootCAs.Subjects()) } func TestConfigureTLSInvalidCACert(t *testing.T) { diff --git a/pkg/server/plugin/upstreamauthority/vault/vault_test.go b/pkg/server/plugin/upstreamauthority/vault/vault_test.go index c4db2833574..f1913cd5460 100644 --- a/pkg/server/plugin/upstreamauthority/vault/vault_test.go +++ b/pkg/server/plugin/upstreamauthority/vault/vault_test.go @@ -16,6 +16,7 @@ import ( "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/common/catalog" + "github.com/spiffe/spire/pkg/common/coretypes/x509certificate" "github.com/spiffe/spire/pkg/common/pemutil" "github.com/spiffe/spire/pkg/server/plugin/upstreamauthority" "github.com/spiffe/spire/proto/spire/common" @@ -713,7 +714,7 @@ func TestMintX509CA(t *testing.T) { x509CAIDs := certChainURIs(x509CA) require.Equal(t, tt.expectX509CA, x509CAIDs) - x509AuthoritiesIDs := certChainURIs(x509Authorities) + x509AuthoritiesIDs := authChainURIs(x509Authorities) require.Equal(t, tt.expectedX509Authorities, x509AuthoritiesIDs) if p.cc.clientParams.Namespace != "" { @@ -842,6 +843,14 @@ func certChainURIs(chain []*x509.Certificate) []string { return uris } +func authChainURIs(chain []*x509certificate.X509Authority) []string { + var uris []string + for _, authority := range chain { + uris = append(uris, certURI(authority.Certificate)) + } + return uris +} + func certURI(cert *x509.Certificate) string { if len(cert.URIs) == 1 { return cert.URIs[0].String() diff --git a/pkg/server/server.go b/pkg/server/server.go index b24c86e7543..f0fcabfe17b 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -379,7 +379,7 @@ func (s *Server) newSVIDRotator(ctx context.Context, serverCA ca.ServerCA, metri return svidRotator, nil } -func (s *Server) newEndpointsServer(ctx context.Context, catalog catalog.Catalog, svidObserver svid.Observer, serverCA ca.ServerCA, metrics telemetry.Metrics, jwtKeyPublisher manager.JwtKeyPublisher, authPolicyEngine *authpolicy.Engine, bundleManager *bundle_client.Manager) (endpoints.Server, error) { +func (s *Server) newEndpointsServer(ctx context.Context, catalog catalog.Catalog, svidObserver svid.Observer, serverCA ca.ServerCA, metrics telemetry.Metrics, authorityManager manager.AuthorityManager, authPolicyEngine *authpolicy.Engine, bundleManager *bundle_client.Manager) (endpoints.Server, error) { config := endpoints.Config{ TCPAddr: s.config.BindAddress, LocalAddr: s.config.BindLocalAddress, @@ -390,7 +390,7 @@ func (s *Server) newEndpointsServer(ctx context.Context, catalog catalog.Catalog Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Endpoints), RootLog: s.config.Log, Metrics: metrics, - JWTKeyPublisher: jwtKeyPublisher, + AuthorityManager: authorityManager, RateLimit: s.config.RateLimit, Uptime: uptime.Uptime, Clock: clock.New(), diff --git a/pkg/server/svid/rotator.go b/pkg/server/svid/rotator.go index ca23b1ea492..fc62dbaff80 100644 --- a/pkg/server/svid/rotator.go +++ b/pkg/server/svid/rotator.go @@ -13,10 +13,16 @@ import ( "github.com/spiffe/spire/pkg/server/ca" ) +var ( + defaultBundleVerificationTicker = 30 * time.Second +) + type Rotator struct { c *RotatorConfig - state observer.Property + state observer.Property + isSVIDTainted bool + taintedReceived chan bool } // State is the current SVID and key @@ -42,17 +48,31 @@ func (r *Rotator) Interval() time.Duration { return r.c.Interval } +func (r *Rotator) triggerTaintedReceived(tainted bool) { + r.taintedReceived <- tainted +} + // Run starts a ticker which monitors the server SVID // for expiration and rotates the SVID as necessary. func (r *Rotator) Run(ctx context.Context) error { t := r.c.Clock.Ticker(r.c.Interval) defer t.Stop() + bundeVerificationTicker := r.c.Clock.Ticker(defaultBundleVerificationTicker) + defer bundeVerificationTicker.Stop() + for { select { case <-ctx.Done(): r.c.Log.Debug("Stopping SVID rotator") return nil + case taintedAuthorities := <-r.c.ServerCA.TaintedAuthorities(): + isTainted := r.isX509AuthorityTainted(taintedAuthorities) + if isTainted { + r.triggerTaintedReceived(true) + r.c.Log.Info("Server SVID signed using a tainted authority, forcing rotation of the Server SVID") + r.isSVIDTainted = true + } case <-t.C: if r.shouldRotate() { if err := r.rotateSVID(ctx); err != nil { @@ -72,7 +92,31 @@ func (r *Rotator) shouldRotate() bool { return true } - return r.c.Clock.Now().After(certHalfLife(s.SVID[0])) + return r.c.Clock.Now().After(certHalfLife(s.SVID[0])) || + r.isSVIDTainted +} + +func (r *Rotator) isX509AuthorityTainted(taintedAuthorities []*x509.Certificate) bool { + svid := r.State().SVID + + rootPool := x509.NewCertPool() + for _, taintedKey := range taintedAuthorities { + rootPool.AddCert(taintedKey) + } + + intermediatePool := x509.NewCertPool() + for _, intermediateCA := range svid[1:] { + intermediatePool.AddCert(intermediateCA) + } + + // Verify certificate chain, using tainted authority as root + _, err := svid[0].Verify(x509.VerifyOptions{ + Intermediates: intermediatePool, + Roots: rootPool, + CurrentTime: r.c.Clock.Now(), + }) + + return err == nil } // rotateSVID cuts a new server SVID from the CA plugin and installs @@ -103,6 +147,9 @@ func (r *Rotator) rotateSVID(ctx context.Context) (err error) { SVID: svid, Key: signer, }) + // New SVID must not be tainted. Rotator is notified about tainted + // authorities only when the intermediate is already rotated. + r.isSVIDTainted = false return nil } diff --git a/pkg/server/svid/rotator_test.go b/pkg/server/svid/rotator_test.go index 1536362b690..6a321491c1b 100644 --- a/pkg/server/svid/rotator_test.go +++ b/pkg/server/svid/rotator_test.go @@ -14,10 +14,16 @@ import ( "github.com/sirupsen/logrus/hooks/test" "github.com/spiffe/go-spiffe/v2/spiffeid" "github.com/spiffe/spire/pkg/common/telemetry" + "github.com/spiffe/spire/pkg/common/x509util" + "github.com/spiffe/spire/pkg/server/ca" + "github.com/spiffe/spire/pkg/server/credtemplate" "github.com/spiffe/spire/pkg/server/plugin/keymanager" "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/fakes/fakeserverca" "github.com/spiffe/spire/test/spiretest" + "github.com/spiffe/spire/test/testkey" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -25,6 +31,10 @@ const ( testTTL = time.Minute * 10 ) +var ( + trustDomain = spiffeid.RequireTrustDomainFromString("example.org") +) + func TestRotator(t *testing.T) { suite.Run(t, new(RotatorTestSuite)) } @@ -39,8 +49,6 @@ type RotatorTestSuite struct { } func (s *RotatorTestSuite) SetupTest() { - trustDomain := spiffeid.RequireTrustDomainFromString("example.org") - s.clock = clock.NewMock(s.T()) s.serverCA = fakeserverca.New(s.T(), trustDomain, &fakeserverca.Options{ Clock: s.clock, @@ -105,6 +113,79 @@ func (s *RotatorTestSuite) TestRotationSucceeds() { s.Require().NoError(<-errCh) } +func (s *RotatorTestSuite) TestForceRotation() { + stream := s.r.Subscribe() + t := s.T() + + var wg sync.WaitGroup + defer wg.Wait() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + err := s.r.Initialize(ctx) + s.Require().NoError(err) + + originalCA := s.serverCA.Bundle() + + // New CA + signer := testkey.MustEC256() + template, err := s.serverCA.CredBuilder().BuildSelfSignedX509CATemplate(context.Background(), credtemplate.SelfSignedX509CAParams{ + PublicKey: signer.Public(), + }) + require.NoError(t, err) + + newCA, err := x509util.CreateCertificate(template, template, signer.Public(), signer) + require.NoError(t, err) + + newCASubjectID := newCA.SubjectKeyId + + // The call to initialize should do the first rotation + cert := s.requireNewCert(stream, big.NewInt(-1)) + + // Run should rotate whenever the certificate is within half of its + // remaining lifetime. + wg.Add(1) + errCh := make(chan error, 1) + go func() { + defer wg.Done() + errCh <- s.r.Run(ctx) + }() + + // Change X509CA + s.serverCA.SetX509CA(&ca.X509CA{ + Signer: signer, + Certificate: newCA, + }) + + s.clock.WaitForTicker(time.Minute, "waiting for the Run() ticker") + + s.r.taintedReceived = make(chan bool, 1) + // Notify that old authority is tainted + s.serverCA.NotifyTaintedX509Authorities(originalCA) + + select { + case received := <-s.r.taintedReceived: + assert.True(t, received) + case <-ctx.Done(): + s.Fail("no notification received") + } + + // Advance interval, so new SVID is signed + s.clock.Add(DefaultRotatorInterval) + cert = s.requireNewCert(stream, cert.SerialNumber) + require.Equal(t, newCASubjectID, cert.AuthorityKeyId) + + // Notify again, must not mark as tainted + s.serverCA.NotifyTaintedX509Authorities(originalCA) + s.clock.Add(DefaultRotatorInterval) + s.requireStateChangeTimeout(stream) + require.False(t, s.r.isSVIDTainted) + + cancel() + s.Require().NoError(<-errCh) +} + func (s *RotatorTestSuite) TestRotationFails() { var wg sync.WaitGroup defer wg.Wait() diff --git a/proto/private/server/journal/journal.pb.go b/proto/private/server/journal/journal.pb.go index 85015d7191d..e41d5f99732 100644 --- a/proto/private/server/journal/journal.pb.go +++ b/proto/private/server/journal/journal.pb.go @@ -96,6 +96,8 @@ type X509CAEntry struct { AuthorityId string `protobuf:"bytes,6,opt,name=authority_id,json=authorityId,proto3" json:"authority_id,omitempty"` // When the CA expires (unix epoch in seconds) NotAfter int64 `protobuf:"varint,7,opt,name=not_after,json=notAfter,proto3" json:"not_after,omitempty"` + // The X.509 Authority Subject Key Identifier (SKID) + UpstreamAuthorityId string `protobuf:"bytes,8,opt,name=upstream_authority_id,json=upstreamAuthorityId,proto3" json:"upstream_authority_id,omitempty"` } func (x *X509CAEntry) Reset() { @@ -179,6 +181,13 @@ func (x *X509CAEntry) GetNotAfter() int64 { return 0 } +func (x *X509CAEntry) GetUpstreamAuthorityId() string { + if x != nil { + return x.UpstreamAuthorityId + } + return "" +} + type JWTKeyEntry struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -341,7 +350,7 @@ var File_private_server_journal_journal_proto protoreflect.FileDescriptor var file_private_server_journal_journal_proto_rawDesc = []byte{ 0x0a, 0x24, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xed, 0x01, 0x0a, 0x0b, 0x58, 0x35, 0x30, 0x39, 0x43, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0b, 0x58, 0x35, 0x30, 0x39, 0x43, 0x41, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6c, 0x6f, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, @@ -356,34 +365,37 @@ var file_private_server_journal_journal_proto_rawDesc = []byte{ 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x6f, - 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x22, 0xd5, 0x01, 0x0a, 0x0b, 0x4a, 0x57, 0x54, 0x4b, 0x65, - 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x6c, 0x6f, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6c, 0x6f, 0x74, 0x49, 0x64, 0x12, - 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x07, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x59, - 0x0a, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x07, 0x78, 0x35, 0x30, - 0x39, 0x43, 0x41, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x58, 0x35, 0x30, - 0x39, 0x43, 0x41, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x78, 0x35, 0x30, 0x39, 0x43, 0x41, - 0x73, 0x12, 0x26, 0x0a, 0x07, 0x6a, 0x77, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x4a, 0x57, 0x54, 0x4b, 0x65, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x07, 0x6a, 0x77, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x2a, 0x38, 0x0a, 0x06, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, - 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x4c, - 0x44, 0x10, 0x04, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x15, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0xd5, 0x01, 0x0a, 0x0b, 0x4a, + 0x57, 0x54, 0x4b, 0x65, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x6c, + 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6c, 0x6f, + 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x69, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1f, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x07, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x49, 0x64, 0x22, 0x59, 0x0a, 0x07, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, + 0x07, 0x78, 0x35, 0x30, 0x39, 0x43, 0x41, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x41, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x78, 0x35, + 0x30, 0x39, 0x43, 0x41, 0x73, 0x12, 0x26, 0x0a, 0x07, 0x6a, 0x77, 0x74, 0x4b, 0x65, 0x79, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x4a, 0x57, 0x54, 0x4b, 0x65, 0x79, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6a, 0x77, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x2a, 0x38, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x44, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x03, 0x12, 0x07, + 0x0a, 0x03, 0x4f, 0x4c, 0x44, 0x10, 0x04, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/private/server/journal/journal.proto b/proto/private/server/journal/journal.proto index 798339bcb24..41530cb9354 100644 --- a/proto/private/server/journal/journal.proto +++ b/proto/private/server/journal/journal.proto @@ -22,6 +22,9 @@ message X509CAEntry { // When the CA expires (unix epoch in seconds) int64 not_after = 7; + + // The X.509 Authority Subject Key Identifier (SKID) + string upstream_authority_id = 8; } message JWTKeyEntry { diff --git a/proto/spire/common/common.pb.go b/proto/spire/common/common.pb.go index ec218ed9ccc..78f9bc3d862 100644 --- a/proto/spire/common/common.pb.go +++ b/proto/spire/common/common.pb.go @@ -719,7 +719,8 @@ type Certificate struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - DerBytes []byte `protobuf:"bytes,1,opt,name=der_bytes,json=derBytes,proto3" json:"der_bytes,omitempty"` + DerBytes []byte `protobuf:"bytes,1,opt,name=der_bytes,json=derBytes,proto3" json:"der_bytes,omitempty"` + TaintedKey bool `protobuf:"varint,2,opt,name=tainted_key,json=taintedKey,proto3" json:"tainted_key,omitempty"` } func (x *Certificate) Reset() { @@ -761,6 +762,13 @@ func (x *Certificate) GetDerBytes() []byte { return nil } +func (x *Certificate) GetTaintedKey() bool { + if x != nil { + return x.TaintedKey + } + return false +} + // * PublicKey represents a PKIX encoded public key type PublicKey struct { state protoimpl.MessageState @@ -854,8 +862,6 @@ type Bundle struct { // * sequence number is a monotonically increasing number that is // incremented every time the bundle is updated SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` - // * list of X.509 tainted keys - X509TaintedKeys []*X509TaintedKey `protobuf:"bytes,6,rep,name=x509_tainted_keys,json=x509TaintedKeys,proto3" json:"x509_tainted_keys,omitempty"` } func (x *Bundle) Reset() { @@ -925,61 +931,6 @@ func (x *Bundle) GetSequenceNumber() uint64 { return 0 } -func (x *Bundle) GetX509TaintedKeys() []*X509TaintedKey { - if x != nil { - return x.X509TaintedKeys - } - return nil -} - -type X509TaintedKey struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // * tainted public key - PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` -} - -func (x *X509TaintedKey) Reset() { - *x = X509TaintedKey{} - if protoimpl.UnsafeEnabled { - mi := &file_spire_common_common_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *X509TaintedKey) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*X509TaintedKey) ProtoMessage() {} - -func (x *X509TaintedKey) ProtoReflect() protoreflect.Message { - mi := &file_spire_common_common_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use X509TaintedKey.ProtoReflect.Descriptor instead. -func (*X509TaintedKey) Descriptor() ([]byte, []int) { - return file_spire_common_common_proto_rawDescGZIP(), []int{11} -} - -func (x *X509TaintedKey) GetPublicKey() []byte { - if x != nil { - return x.PublicKey - } - return nil -} - type BundleMask struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -995,7 +946,7 @@ type BundleMask struct { func (x *BundleMask) Reset() { *x = BundleMask{} if protoimpl.UnsafeEnabled { - mi := &file_spire_common_common_proto_msgTypes[12] + mi := &file_spire_common_common_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1008,7 +959,7 @@ func (x *BundleMask) String() string { func (*BundleMask) ProtoMessage() {} func (x *BundleMask) ProtoReflect() protoreflect.Message { - mi := &file_spire_common_common_proto_msgTypes[12] + mi := &file_spire_common_common_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1021,7 +972,7 @@ func (x *BundleMask) ProtoReflect() protoreflect.Message { // Deprecated: Use BundleMask.ProtoReflect.Descriptor instead. func (*BundleMask) Descriptor() ([]byte, []int) { - return file_spire_common_common_proto_rawDescGZIP(), []int{12} + return file_spire_common_common_proto_rawDescGZIP(), []int{11} } func (x *BundleMask) GetRootCas() bool { @@ -1075,7 +1026,7 @@ type AttestedNodeMask struct { func (x *AttestedNodeMask) Reset() { *x = AttestedNodeMask{} if protoimpl.UnsafeEnabled { - mi := &file_spire_common_common_proto_msgTypes[13] + mi := &file_spire_common_common_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1088,7 +1039,7 @@ func (x *AttestedNodeMask) String() string { func (*AttestedNodeMask) ProtoMessage() {} func (x *AttestedNodeMask) ProtoReflect() protoreflect.Message { - mi := &file_spire_common_common_proto_msgTypes[13] + mi := &file_spire_common_common_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1101,7 +1052,7 @@ func (x *AttestedNodeMask) ProtoReflect() protoreflect.Message { // Deprecated: Use AttestedNodeMask.ProtoReflect.Descriptor instead. func (*AttestedNodeMask) Descriptor() ([]byte, []int) { - return file_spire_common_common_proto_rawDescGZIP(), []int{13} + return file_spire_common_common_proto_rawDescGZIP(), []int{12} } func (x *AttestedNodeMask) GetAttestationDataType() bool { @@ -1249,75 +1200,69 @@ var file_spire_common_common_proto_rawDesc = []byte{ 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x4b, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x64, 0x65, 0x72, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0x7a, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1d, - 0x0a, 0x0a, 0x70, 0x6b, 0x69, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6b, 0x69, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x69, 0x64, 0x12, - 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x22, 0xbf, 0x02, - 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x72, 0x75, 0x73, - 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49, 0x64, - 0x12, 0x34, 0x0a, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x07, 0x72, - 0x6f, 0x6f, 0x74, 0x43, 0x61, 0x73, 0x12, 0x41, 0x0a, 0x10, 0x6a, 0x77, 0x74, 0x5f, 0x73, 0x69, - 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0e, 0x6a, 0x77, 0x74, 0x53, 0x69, - 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x48, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0f, - 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x11, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x74, 0x61, - 0x69, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x58, 0x35, 0x30, 0x39, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x0f, - 0x78, 0x35, 0x30, 0x39, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, - 0x2f, 0x0a, 0x0e, 0x58, 0x35, 0x30, 0x39, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, - 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, - 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x61, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x6a, 0x77, - 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6a, 0x77, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, - 0x68, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x48, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x2a, 0x0a, 0x11, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x78, 0x35, 0x30, - 0x39, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x9f, 0x02, 0x0a, - 0x10, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x73, - 0x6b, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x13, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, - 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x63, 0x65, 0x72, - 0x74, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x16, 0x6e, 0x65, 0x77, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x6e, 0x65, 0x77, 0x43, 0x65, - 0x72, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2b, - 0x0a, 0x12, 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, - 0x66, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x65, 0x77, 0x43, - 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x61, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x42, 0x2c, - 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, - 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, + 0x65, 0x79, 0x22, 0x7a, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, + 0x1d, 0x0a, 0x0a, 0x70, 0x6b, 0x69, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x6b, 0x69, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x69, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x22, 0xf5, + 0x01, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x07, + 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x61, 0x73, 0x12, 0x41, 0x0a, 0x10, 0x6a, 0x77, 0x74, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x0e, 0x6a, 0x77, 0x74, 0x53, + 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x48, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x61, 0x73, + 0x12, 0x28, 0x0a, 0x10, 0x6a, 0x77, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6a, 0x77, 0x74, 0x53, + 0x69, 0x67, 0x6e, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x48, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x74, + 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x4b, 0x65, + 0x79, 0x73, 0x22, 0x9f, 0x02, 0x0a, 0x10, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4e, + 0x6f, 0x64, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x74, 0x74, 0x65, 0x73, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, + 0x33, 0x0a, 0x16, 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x13, 0x6e, 0x65, 0x77, 0x43, 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x12, 0x6e, 0x65, 0x77, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x6e, 0x65, 0x77, 0x43, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x61, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x74, 0x74, 0x65, 0x73, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x61, 0x74, + 0x74, 0x65, 0x73, 0x74, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1332,7 +1277,7 @@ func file_spire_common_common_proto_rawDescGZIP() []byte { return file_spire_common_common_proto_rawDescData } -var file_spire_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_spire_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_spire_common_common_proto_goTypes = []any{ (*Empty)(nil), // 0: spire.common.Empty (*AttestationData)(nil), // 1: spire.common.AttestationData @@ -1345,23 +1290,21 @@ var file_spire_common_common_proto_goTypes = []any{ (*Certificate)(nil), // 8: spire.common.Certificate (*PublicKey)(nil), // 9: spire.common.PublicKey (*Bundle)(nil), // 10: spire.common.Bundle - (*X509TaintedKey)(nil), // 11: spire.common.X509TaintedKey - (*BundleMask)(nil), // 12: spire.common.BundleMask - (*AttestedNodeMask)(nil), // 13: spire.common.AttestedNodeMask + (*BundleMask)(nil), // 11: spire.common.BundleMask + (*AttestedNodeMask)(nil), // 12: spire.common.AttestedNodeMask } var file_spire_common_common_proto_depIdxs = []int32{ - 2, // 0: spire.common.Selectors.entries:type_name -> spire.common.Selector - 2, // 1: spire.common.AttestedNode.selectors:type_name -> spire.common.Selector - 2, // 2: spire.common.RegistrationEntry.selectors:type_name -> spire.common.Selector - 5, // 3: spire.common.RegistrationEntries.entries:type_name -> spire.common.RegistrationEntry - 8, // 4: spire.common.Bundle.root_cas:type_name -> spire.common.Certificate - 9, // 5: spire.common.Bundle.jwt_signing_keys:type_name -> spire.common.PublicKey - 11, // 6: spire.common.Bundle.x509_tainted_keys:type_name -> spire.common.X509TaintedKey - 7, // [7:7] is the sub-list for method output_type - 7, // [7:7] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name + 2, // 0: spire.common.Selectors.entries:type_name -> spire.common.Selector + 2, // 1: spire.common.AttestedNode.selectors:type_name -> spire.common.Selector + 2, // 2: spire.common.RegistrationEntry.selectors:type_name -> spire.common.Selector + 5, // 3: spire.common.RegistrationEntries.entries:type_name -> spire.common.RegistrationEntry + 8, // 4: spire.common.Bundle.root_cas:type_name -> spire.common.Certificate + 9, // 5: spire.common.Bundle.jwt_signing_keys:type_name -> spire.common.PublicKey + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_spire_common_common_proto_init() } @@ -1503,18 +1446,6 @@ func file_spire_common_common_proto_init() { } } file_spire_common_common_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*X509TaintedKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_spire_common_common_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*BundleMask); i { case 0: return &v.state @@ -1526,7 +1457,7 @@ func file_spire_common_common_proto_init() { return nil } } - file_spire_common_common_proto_msgTypes[13].Exporter = func(v any, i int) any { + file_spire_common_common_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*AttestedNodeMask); i { case 0: return &v.state @@ -1545,7 +1476,7 @@ func file_spire_common_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_spire_common_common_proto_rawDesc, NumEnums: 0, - NumMessages: 14, + NumMessages: 13, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/spire/common/common.proto b/proto/spire/common/common.proto index b22f752d1cd..cd1ed63c616 100644 --- a/proto/spire/common/common.proto +++ b/proto/spire/common/common.proto @@ -125,6 +125,7 @@ message RegistrationEntries { /** Certificate represents a ASN.1/DER encoded X509 certificate */ message Certificate { bytes der_bytes = 1; + bool tainted_key = 2; } /** PublicKey represents a PKIX encoded public key */ @@ -159,14 +160,6 @@ message Bundle { /** sequence number is a monotonically increasing number that is * incremented every time the bundle is updated */ uint64 sequence_number = 5; - - /** list of X.509 tainted keys */ - repeated X509TaintedKey x509_tainted_keys = 6; -} - -message X509TaintedKey { - /** tainted public key*/ - bytes public_key = 1; } message BundleMask { diff --git a/support/oidc-discovery-provider/main.go b/support/oidc-discovery-provider/main.go index 2b6dd69e453..960dc50138f 100644 --- a/support/oidc-discovery-provider/main.go +++ b/support/oidc-discovery-provider/main.go @@ -157,7 +157,7 @@ func newSource(log logrus.FieldLogger, config *Config) (JWKSSource, error) { case config.WorkloadAPI != nil: workloadAPIAddr, err := config.getWorkloadAPIAddr() if err != nil { - return nil, errs.New(err.Error()) + return nil, errs.Wrap(err) } return NewWorkloadAPISource(WorkloadAPISourceConfig{ Log: log, diff --git a/test/fakes/fakedatastore/fakedatastore.go b/test/fakes/fakedatastore/fakedatastore.go index 3e00637dbd4..283a2b61eea 100644 --- a/test/fakes/fakedatastore/fakedatastore.go +++ b/test/fakes/fakedatastore/fakedatastore.go @@ -2,7 +2,6 @@ package fakedatastore import ( "context" - "crypto" "fmt" "sort" "sync/atomic" @@ -198,18 +197,18 @@ func (s *DataStore) FetchAttestedNodeEvent(ctx context.Context, eventID uint) (* return s.ds.FetchAttestedNodeEvent(ctx, eventID) } -func (s *DataStore) TaintX509CA(ctx context.Context, trustDomainID string, publicKeyToTaint crypto.PublicKey) error { +func (s *DataStore) TaintX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToTaint string) error { if err := s.getNextError(); err != nil { return err } - return s.ds.TaintX509CA(ctx, trustDomainID, publicKeyToTaint) + return s.ds.TaintX509CA(ctx, trustDomainID, subjectKeyIDToTaint) } -func (s *DataStore) RevokeX509CA(ctx context.Context, trustDomainID string, publicKeyToRevoke crypto.PublicKey) error { +func (s *DataStore) RevokeX509CA(ctx context.Context, trustDomainID string, subjectKeyIDToRevoke string) error { if err := s.getNextError(); err != nil { return err } - return s.ds.RevokeX509CA(ctx, trustDomainID, publicKeyToRevoke) + return s.ds.RevokeX509CA(ctx, trustDomainID, subjectKeyIDToRevoke) } func (s *DataStore) TaintJWTKey(ctx context.Context, trustDomainID string, authorityID string) (*common.PublicKey, error) { diff --git a/test/fakes/fakeserverca/serverca.go b/test/fakes/fakeserverca/serverca.go index 1a5d09341ba..5a85db8df36 100644 --- a/test/fakes/fakeserverca/serverca.go +++ b/test/fakes/fakeserverca/serverca.go @@ -128,6 +128,10 @@ func (c *CA) SetJWTKey(jwtKey *ca.JWTKey) { c.ca.SetJWTKey(jwtKey) } +func (c *CA) NotifyTaintedX509Authorities(taintedAuthorities []*x509.Certificate) { + c.ca.NotifyTaintedX509Authorities(taintedAuthorities) +} + func (c *CA) SignDownstreamX509CA(ctx context.Context, params ca.DownstreamX509CAParams) ([]*x509.Certificate, error) { if c.err != nil { return nil, c.err @@ -163,6 +167,10 @@ func (c *CA) SignWorkloadJWTSVID(ctx context.Context, params ca.WorkloadJWTSVIDP return c.ca.SignWorkloadJWTSVID(ctx, params) } +func (c *CA) TaintedAuthorities() <-chan []*x509.Certificate { + return c.ca.TaintedAuthorities() +} + func (c *CA) SetError(err error) { c.err = err } diff --git a/test/fakes/fakeupstreamauthority/upstreamauthority.go b/test/fakes/fakeupstreamauthority/upstreamauthority.go index d18519ddd5f..ae7a2a09675 100644 --- a/test/fakes/fakeupstreamauthority/upstreamauthority.go +++ b/test/fakes/fakeupstreamauthority/upstreamauthority.go @@ -6,6 +6,7 @@ import ( "crypto/rand" "crypto/x509" "crypto/x509/pkix" + "errors" "math/big" "sync" "testing" @@ -18,6 +19,7 @@ import ( "github.com/spiffe/spire/pkg/common/x509svid" "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/proto/spire/common" + "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/testkey" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -30,6 +32,7 @@ var ( ) type Config struct { + Clock clock.Clock TrustDomain spiffeid.TrustDomain UseIntermediate bool DisallowPublishJWTKey bool @@ -47,9 +50,9 @@ type UpstreamAuthority struct { x509CAMtx sync.RWMutex x509CA *x509svid.UpstreamCA x509CASN int64 - x509Root *x509.Certificate + x509Root *x509certificate.X509Authority x509Intermediate *x509.Certificate - x509Roots []*x509.Certificate + x509Roots []*x509certificate.X509Authority jwtKeysMtx sync.RWMutex jwtKeys []*common.PublicKey @@ -60,6 +63,9 @@ type UpstreamAuthority struct { } func New(t *testing.T, config Config) *UpstreamAuthority { + if config.Clock == nil { + config.Clock = clock.NewMock(t) + } ua := &UpstreamAuthority{ t: t, config: config, @@ -82,7 +88,7 @@ func (ua *UpstreamAuthority) MintX509CAAndSubscribe(request *upstreamauthorityv1 } if err := ua.sendMintX509CAResponse(stream, &upstreamauthorityv1.MintX509CAResponse{ - X509CaChain: x509certificate.RequireToPluginProtos(x509CAChain), + X509CaChain: x509certificate.RequireToPluginFromCertificates(x509CAChain), UpstreamX509Roots: x509certificate.RequireToPluginProtos(ua.X509Roots()), }); err != nil { return err @@ -140,7 +146,7 @@ func (ua *UpstreamAuthority) RotateX509CA() { caKey = x509IntKey } else { ua.createRootCertificate() - caCert = ua.x509Root + caCert = ua.x509Root.Certificate caKey = x509RootKey } @@ -152,13 +158,30 @@ func (ua *UpstreamAuthority) RotateX509CA() { ua.TriggerX509RootsChanged() } -func (ua *UpstreamAuthority) X509Root() *x509.Certificate { +func (ua *UpstreamAuthority) TaintAuthority(index int) error { + ua.x509CAMtx.Lock() + defer ua.x509CAMtx.Unlock() + + rootsLen := len(ua.x509Roots) + if rootsLen == 0 { + return errors.New("no root to taint") + } + if index >= rootsLen { + return errors.New("out of range") + } + + ua.x509Roots[index].Tainted = true + ua.TriggerX509RootsChanged() + return nil +} + +func (ua *UpstreamAuthority) X509Root() *x509certificate.X509Authority { ua.x509CAMtx.RLock() defer ua.x509CAMtx.RUnlock() return ua.x509Root } -func (ua *UpstreamAuthority) X509Roots() []*x509.Certificate { +func (ua *UpstreamAuthority) X509Roots() []*x509certificate.X509Authority { ua.x509CAMtx.RLock() defer ua.x509CAMtx.RUnlock() return ua.x509Roots @@ -263,8 +286,11 @@ func (ua *UpstreamAuthority) sendPublishJWTKeyStream(stream upstreamauthorityv1. } func (ua *UpstreamAuthority) createRootCertificate() { - template := createCATemplate("FAKEUPSTREAMAUTHORITY-ROOT", ua.nextX509CASN(), ua.config.KeyUsage) - ua.x509Root = createCertificate(ua.t, template, template, &x509RootKey.PublicKey, x509RootKey) + template := createCATemplate(ua.config.Clock.Now(), "FAKEUPSTREAMAUTHORITY-ROOT", ua.nextX509CASN(), ua.config.KeyUsage) + root := createCertificate(ua.t, template, template, &x509RootKey.PublicKey, x509RootKey) + ua.x509Root = &x509certificate.X509Authority{ + Certificate: root, + } ua.x509Roots = append(ua.x509Roots, ua.x509Root) } @@ -272,8 +298,8 @@ func (ua *UpstreamAuthority) createIntermediateCertificate() { if ua.x509Root == nil { ua.createRootCertificate() } - template := createCATemplate("FAKEUPSTREAMAUTHORITY-INT", ua.nextX509CASN(), ua.config.KeyUsage) - ua.x509Intermediate = createCertificate(ua.t, template, ua.x509Root, &x509IntKey.PublicKey, x509RootKey) + template := createCATemplate(ua.config.Clock.Now(), "FAKEUPSTREAMAUTHORITY-INT", ua.nextX509CASN(), ua.config.KeyUsage) + ua.x509Intermediate = createCertificate(ua.t, template, ua.x509Root.Certificate, &x509IntKey.PublicKey, x509RootKey) } func (ua *UpstreamAuthority) nextX509CASN() int64 { @@ -281,8 +307,7 @@ func (ua *UpstreamAuthority) nextX509CASN() int64 { return ua.x509CASN } -func createCATemplate(cn string, sn int64, keyUsage x509.KeyUsage) *x509.Certificate { - now := time.Now() +func createCATemplate(now time.Time, cn string, sn int64, keyUsage x509.KeyUsage) *x509.Certificate { return &x509.Certificate{ SerialNumber: big.NewInt(sn), Subject: pkix.Name{ diff --git a/test/integration/README.md b/test/integration/README.md index 1d15dbc46be..fbb24af066d 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -3,6 +3,8 @@ This directory contains the Integration Test framework for SPIRE. Integration tests are run nightly, for each PR, and doing a release. +The suites are under `suites/`. + ## Executing Test Suites When the framework executes a test suite, it performs the following: @@ -26,8 +28,7 @@ scripts are **NOT** executed. ## Adding a Test Suite 1. Create a new folder under `suites/`. -1. Add a `README.md` to the test suite and link to it in this document under - [Test Suites](#test-suites). The README should contain high level details +1. Add a `README.md` to the test suite. The README should contain high level details about what is being tested by the test suite. 1. Add step scripts (i.e. files matching the `??-*` pattern) that perform the requisite steps. These scripts will be executed in lexographic order. @@ -68,24 +69,3 @@ The following environment variables are available to the teardown script: | `REPODIR` | Path to the root of the git repository. | | `ROOTDIR` | Path to the root of the integration test directory (i.e. `${REPODIR}/test/integration` ) | | `SUCCESS` | If set, indicates the test suite was successful. | - -## Test Suites - -* [Admin Endpoints](suites/admin-endpoints/README.md) -* [Upstream Authority cert-manager](suites/upstream-authority-cert-manager/README.md) -* [Upstream Authority vault](suites/upstream-authority-vault/README.md) -* [Datastore (MySQL)](suites/datastore-mysql/README.md) -* [Datastore (Postgres)](suites/datastore-postgres/README.md) -* [Debug Endpoints](suites/debug-endpoints/README.md) -* [Downstream Endpoint](suites/downstream-endpoints/README.md) -* [Envoy SDS (v3)](suites/envoy-sds-v3/README.md) -* [Ghostunnel + Federation](suites/ghostunnel-federation/README.md) -* [Join Token](suites/join-token/README.md) -* [Kubernetes](suites/k8s) -* [Nested Rotation](suites/nested-rotation/README.md) -* [Node Attestation](suites/node-attestation/README.md) -* [Rotation](suites/rotation/README.md) -* [Self Test](suites/self-test/README.md) -* [SPIRE Server CLI](suites/spire-server-cli/README.md) -* [Upgrade](suites/upgrade/README.md) -* [Windows Service](suites-windows/windows-service/README.md) diff --git a/test/integration/suites-windows/windows-service/04-create-registration-entries b/test/integration/suites-windows/windows-service/04-create-registration-entries index 9aac6412ab6..506fe604625 100644 --- a/test/integration/suites-windows/windows-service/04-create-registration-entries +++ b/test/integration/suites-windows/windows-service/04-create-registration-entries @@ -7,6 +7,6 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "windows:user_name:User Manager\ContainerUser" \ - -ttl 0 + -x509SVIDTTL 0 assert-synced-entry "spiffe://domain.test/workload" diff --git a/test/integration/suites-windows/windows-workload-attestor/04-create-registration-entries b/test/integration/suites-windows/windows-workload-attestor/04-create-registration-entries index 88de142502e..6edd0903cd8 100644 --- a/test/integration/suites-windows/windows-workload-attestor/04-create-registration-entries +++ b/test/integration/suites-windows/windows-workload-attestor/04-create-registration-entries @@ -6,7 +6,7 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "windows:user_name:User Manager\ContainerUser" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/workload" diff --git a/test/integration/suites/admin-endpoints/05-create-registration-entries b/test/integration/suites/admin-endpoints/05-create-registration-entries index abf06df863d..589304e608d 100755 --- a/test/integration/suites/admin-endpoints/05-create-registration-entries +++ b/test/integration/suites/admin-endpoints/05-create-registration-entries @@ -7,7 +7,7 @@ docker compose exec -T spire-server-a \ -spiffeID "spiffe://domain-a.test/admin" \ -selector "unix:uid:1001" \ -admin \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent-a" "spiffe://domain-a.test/admin" log-debug "creating foreign admin registration entry..." @@ -17,7 +17,7 @@ docker compose exec -T spire-server-b \ -spiffeID "spiffe://domain-b.test/admin" \ -selector "unix:uid:1003" \ -federatesWith "spiffe://domain-a.test" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent-b" "spiffe://domain-b.test/admin" log-debug "creating regular registration entry..." @@ -26,5 +26,5 @@ docker compose exec -T spire-server-a \ -parentID "spiffe://domain-a.test/spire/agent/x509pop/$(fingerprint conf/domain-a/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain-a.test/workload" \ -selector "unix:uid:1002" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent-a" "spiffe://domain-a.test/workload" diff --git a/test/integration/suites/agent-cli/07-check-api-watch b/test/integration/suites/agent-cli/07-check-api-watch index 47dc61f1fb2..c7846bbc932 100755 --- a/test/integration/suites/agent-cli/07-check-api-watch +++ b/test/integration/suites/agent-cli/07-check-api-watch @@ -8,7 +8,7 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload-$m" \ -selector "unix:uid:1001" \ - -ttl 20 & + -x509SVIDTTL 20 & # Get the PID of the last background process API_WATCH_PID=$! diff --git a/test/integration/suites/datastore-mysql-replication/01-test-variants b/test/integration/suites/datastore-mysql-replication/01-test-variants index fd4b4063e6f..a09d2f7195e 100755 --- a/test/integration/suites/datastore-mysql-replication/01-test-variants +++ b/test/integration/suites/datastore-mysql-replication/01-test-variants @@ -68,7 +68,7 @@ get_mysql_root_password() { echo "${root_password}" } -# Setup a primary server with group replication. It is compatible with MySQL 5.7 and above. +# Setup a primary server with group replication. configure-readwrite-group-replication() { service=$1 mysql_root_password=$2 @@ -86,7 +86,7 @@ SELECT * FROM performance_schema.replication_group_members; docker compose exec -T "${service}" mysql -uroot "-p$mysql_root_password" -e "${replication_script}" } -# Setup a replica server with group replication. It is compatible with MySQL 5.7 and above. +# Setup a replica server with group replication. configure-readonly-group-replication() { service=$1 mysql_root_password=$2 @@ -118,11 +118,4 @@ test-mysql-replication() { docker-stop "${readwrite_service_name}" "${readonly_service_name}" } -arch=$(uname -m) -if [ $arch = "amd64" ] || [ $arch = "x86_64" ]; then - test-mysql-replication mysql-5-7 || exit 1 -else - log-warn "skipping MySQL 5.7 test on $arch" -fi - test-mysql-replication mysql-8-0 || exit 1 diff --git a/test/integration/suites/datastore-mysql-replication/docker-compose.yaml b/test/integration/suites/datastore-mysql-replication/docker-compose.yaml index a4e1bd3429a..1ce8f09d494 100644 --- a/test/integration/suites/datastore-mysql-replication/docker-compose.yaml +++ b/test/integration/suites/datastore-mysql-replication/docker-compose.yaml @@ -1,65 +1,5 @@ version: '3.5' services: - # MySQL 5.7 containers - mysql-5-7-readwrite: - image: mysql/mysql-server:5.7 - environment: - - MYSQL_PASSWORD=test - - MYSQL_DATABASE=spire - - MYSQL_USER=spire - - MYSQL_RANDOM_ROOT_PASSWORD=yes - ports: - - "9999:3306" - container_name: mysql-5-7-readwrite - command: - - "--server-id=1" - - "--log-bin=mysql-bin-1.log" - - "--enforce-gtid-consistency=ON" - - "--log-slave-updates=ON" - - "--gtid-mode=ON" - - "--transaction-write-set-extraction=XXHASH64" - - "--binlog-checksum=NONE" - - "--master-info-repository=TABLE" - - "--relay-log-info-repository=TABLE" - - "--plugin-load=group_replication.so" - - "--relay-log-recovery=ON" - - "--group-replication-start-on-boot=OFF" - - "--group-replication-group-name=43991639-43EE-454C-82BD-F08A13F3C3ED" - - "--group-replication-local-address=mysql-5-7-readwrite:33061" - - "--group-replication-group-seeds=mysql-5-7-readwrite:33061,mysql-5-7-readonly:33062" - - "--group-replication-single-primary-mode=ON" - - "--group-replication-enforce-update-everywhere-checks=OFF" - - "--group-replication-auto-increment-increment=1" - mysql-5-7-readonly: - image: mysql/mysql-server:5.7 - environment: - - MYSQL_PASSWORD=test - - MYSQL_DATABASE=spire - - MYSQL_USER=spire - - MYSQL_RANDOM_ROOT_PASSWORD=yes - ports: - - "10000:3306" - container_name: mysql-5-7-readonly - command: - - "--server-id=2" - - "--log-bin=mysql-bin-1.log" - - "--enforce-gtid-consistency=ON" - - "--log-slave-updates=ON" - - "--gtid-mode=ON" - - "--transaction-write-set-extraction=XXHASH64" - - "--binlog-checksum=NONE" - - "--master-info-repository=TABLE" - - "--relay-log-info-repository=TABLE" - - "--plugin-load=group_replication.so" - - "--relay-log-recovery=ON" - - "--group-replication-start-on-boot=OFF" - - "--group-replication-group-name=43991639-43EE-454C-82BD-F08A13F3C3ED" - - "--group-replication-local-address=mysql-5-7-readonly:33062" - - "--group-replication-group-seeds=mysql-5-7-readwrite:33061,mysql-5-7-readonly:33062" - - "--group-replication-single-primary-mode=ON" - - "--group-replication-enforce-update-everywhere-checks=OFF" - - "--group-replication-auto-increment-increment=1" - # MySQL 8.0 containers mysql-8-0-readwrite: image: mysql/mysql-server:8.0 diff --git a/test/integration/suites/datastore-mysql/01-test-variants b/test/integration/suites/datastore-mysql/01-test-variants index 3d140956818..681c50451fd 100755 --- a/test/integration/suites/datastore-mysql/01-test-variants +++ b/test/integration/suites/datastore-mysql/01-test-variants @@ -51,11 +51,4 @@ test-mysql() { docker-stop "${SERVICE}" } -arch=$(uname -m) -if [ $arch = "amd64" ] || [ $arch = "x86_64" ]; then - test-mysql mysql-5-7 || exit 1 -else - log-warn "skipping MySQL 5.7 test on $arch" -fi - test-mysql mysql-8-0 || exit 1 diff --git a/test/integration/suites/datastore-mysql/README.md b/test/integration/suites/datastore-mysql/README.md index 17f8b997e15..8edb888d505 100644 --- a/test/integration/suites/datastore-mysql/README.md +++ b/test/integration/suites/datastore-mysql/README.md @@ -4,7 +4,6 @@ The suite runs the following MySQL versions against the SQL datastore unit tests: -- 5.7 - 8.0 A special unit test binary is built from sources that targets the docker diff --git a/test/integration/suites/datastore-mysql/docker-compose.yaml b/test/integration/suites/datastore-mysql/docker-compose.yaml index 98ff40192ae..847acc25ac6 100644 --- a/test/integration/suites/datastore-mysql/docker-compose.yaml +++ b/test/integration/suites/datastore-mysql/docker-compose.yaml @@ -1,15 +1,4 @@ services: - mysql-5-7: - image: mysql:5.7 - environment: - - MYSQL_PASSWORD=test - - MYSQL_DATABASE=spire - - MYSQL_USER=spire - - MYSQL_RANDOM_ROOT_PASSWORD=yes - tmpfs: - - /var/lib/mysql - ports: - - "9999:3306" mysql-8-0: image: mysql:8.0 environment: diff --git a/test/integration/suites/debug-endpoints/04-create-registration-entries b/test/integration/suites/debug-endpoints/04-create-registration-entries index 7eef854da04..33c41a9b150 100755 --- a/test/integration/suites/debug-endpoints/04-create-registration-entries +++ b/test/integration/suites/debug-endpoints/04-create-registration-entries @@ -7,7 +7,7 @@ docker compose exec -T spire-server \ -spiffeID "spiffe://domain.test/admin" \ -selector "unix:uid:1001" \ -admin \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/admin" log-debug "creating regular registration entry..." @@ -16,6 +16,6 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "unix:uid:1002" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/workload" diff --git a/test/integration/suites/delegatedidentity/04-create-registration-entries b/test/integration/suites/delegatedidentity/04-create-registration-entries index 000c073069c..0ba8854c5c0 100755 --- a/test/integration/suites/delegatedidentity/04-create-registration-entries +++ b/test/integration/suites/delegatedidentity/04-create-registration-entries @@ -6,7 +6,7 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/authorized_delegate" \ -selector "unix:uid:1001" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/authorized_delegate" log-debug "creating registration entry for workload..." @@ -15,5 +15,5 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "unix:uid:1002" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/workload" diff --git a/test/integration/suites/downstream-endpoints/04-create-entries b/test/integration/suites/downstream-endpoints/04-create-entries index a8c4dbd9bd7..29b4d56d7e1 100755 --- a/test/integration/suites/downstream-endpoints/04-create-entries +++ b/test/integration/suites/downstream-endpoints/04-create-entries @@ -7,7 +7,7 @@ docker compose exec -T spire-server \ -spiffeID "spiffe://domain.test/downstream" \ -selector "unix:uid:1001" \ -downstream \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/downstream" log-debug "creating workload registration entry..." @@ -16,6 +16,6 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "unix:uid:1002" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/workload" diff --git a/test/integration/suites/envoy-sds-v3-spiffe-auth/00-test-envoy-releases.sh b/test/integration/suites/envoy-sds-v3-spiffe-auth/00-test-envoy-releases.sh index d2d25109615..3492f51b7c5 100755 --- a/test/integration/suites/envoy-sds-v3-spiffe-auth/00-test-envoy-releases.sh +++ b/test/integration/suites/envoy-sds-v3-spiffe-auth/00-test-envoy-releases.sh @@ -59,7 +59,7 @@ setup-tests() { -spiffeID "spiffe://federated-domain.test/downstream-proxy" \ -selector "unix:uid:0" \ -federatesWith "spiffe://domain.test" \ - -ttl 0 + -x509SVIDTTL 0 log-debug "creating registration entry for upstream proxy..." docker compose exec -T upstream-spire-server \ @@ -68,7 +68,7 @@ setup-tests() { -spiffeID "spiffe://domain.test/upstream-proxy" \ -selector "unix:uid:0" \ -federatesWith "spiffe://federated-domain.test" \ - -ttl 0 + -x509SVIDTTL 0 log-debug "creating registration entry for downstream proxy..." docker compose exec -T upstream-spire-server \ @@ -76,7 +76,7 @@ setup-tests() { -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/downstream/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/downstream-proxy" \ -selector "unix:uid:0" \ - -ttl 0 + -x509SVIDTTL 0 } test-envoy() { diff --git a/test/integration/suites/envoy-sds-v3/00-test-envoy-releases b/test/integration/suites/envoy-sds-v3/00-test-envoy-releases index f2f2e29c880..9ab835901f3 100755 --- a/test/integration/suites/envoy-sds-v3/00-test-envoy-releases +++ b/test/integration/suites/envoy-sds-v3/00-test-envoy-releases @@ -20,7 +20,7 @@ setup-tests() { -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/upstream-agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/upstream-workload" \ -selector "unix:uid:0" \ - -ttl 0 + -x509SVIDTTL 0 log-debug "creating registration entry for downstream workload..." docker compose exec -T spire-server \ @@ -28,7 +28,7 @@ setup-tests() { -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/downstream-agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/downstream-workload" \ -selector "unix:uid:0" \ - -ttl 0 + -x509SVIDTTL 0 } test-envoy() { diff --git a/test/integration/suites/evict-agent/04-ban-agent b/test/integration/suites/evict-agent/04-ban-agent index 5bf22762690..1aba5342546 100755 --- a/test/integration/suites/evict-agent/04-ban-agent +++ b/test/integration/suites/evict-agent/04-ban-agent @@ -2,21 +2,21 @@ log-debug "banning agent..." -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent ban \ - -spiffeID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" - -# Check at most 30 times (with one second in between) that the agent has -# successfully banned +# Attempt at most 30 times (with one second in between) to ban the agent MAXCHECKS=30 CHECKINTERVAL=1 +spiffe_id="spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" for ((i=1;i<=MAXCHECKS;i++)); do - log-info "checking for agent is shutting down ($i of $MAXCHECKS max)..." - docker compose logs spire-agent - if docker compose logs spire-agent | grep "Agent is banned: removing SVID and shutting down"; then + log-info "attempting to ban agent ${spiffe_id} ($i of $MAXCHECKS max)..." + + docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent ban \ + -spiffeID "${spiffe_id}" + docker compose logs spire-server + if docker compose logs spire-server | grep "Agent banned"; then exit 0 fi sleep "${CHECKINTERVAL}" done -fail-now "timed out waiting for agent to shutdown" +fail-now "timed out waiting for successful ban" diff --git a/test/integration/suites/evict-agent/05-agent-is-banned b/test/integration/suites/evict-agent/05-agent-is-banned new file mode 100755 index 00000000000..da9a22e828f --- /dev/null +++ b/test/integration/suites/evict-agent/05-agent-is-banned @@ -0,0 +1,16 @@ +#!/bin/bash + +# Check at most 30 times (with one second in between) that the agent has +# been successfully banned +MAXCHECKS=30 +CHECKINTERVAL=1 +for ((i=1;i<=MAXCHECKS;i++)); do + log-info "checking for agent is shutting down due to being banned ($i of $MAXCHECKS max)..." + docker compose logs spire-agent + if docker compose logs spire-agent | grep "Agent is banned: removing SVID and shutting down"; then + exit 0 + fi + sleep "${CHECKINTERVAL}" +done + +fail-now "timed out waiting for agent to shutdown" diff --git a/test/integration/suites/evict-agent/05-agent-failed-to-start b/test/integration/suites/evict-agent/06-agent-failed-to-start similarity index 94% rename from test/integration/suites/evict-agent/05-agent-failed-to-start rename to test/integration/suites/evict-agent/06-agent-failed-to-start index 8116e357442..9a4132c7e2a 100755 --- a/test/integration/suites/evict-agent/05-agent-failed-to-start +++ b/test/integration/suites/evict-agent/06-agent-failed-to-start @@ -3,7 +3,7 @@ log-debug "starting agent again..." docker-up spire-agent -# Check at most 30 times (with one second in between) that the agent is not able to get new +# Check at most 30 times (with one second in between) that the agent is not able to get new # workload entries. MAXCHECKS=30 CHECKINTERVAL=1 diff --git a/test/integration/suites/evict-agent/06-delete-agent b/test/integration/suites/evict-agent/06-delete-agent deleted file mode 100755 index 29fbe58c4e3..00000000000 --- a/test/integration/suites/evict-agent/06-delete-agent +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -log-debug "deleting agent to enable reattestation..." - -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent evict \ - -spiffeID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" diff --git a/test/integration/suites/evict-agent/07-evict-agent b/test/integration/suites/evict-agent/07-evict-agent new file mode 100755 index 00000000000..8d63703f955 --- /dev/null +++ b/test/integration/suites/evict-agent/07-evict-agent @@ -0,0 +1,20 @@ +#!/bin/bash + +log-debug "evicting (deleting) agent to re-enable attestation..." + +# Check at most 30 times (with one second in between) that we can evict the agent +MAXCHECKS=30 +CHECKINTERVAL=1 +spiffe_id="spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" +for ((i=1;i<=MAXCHECKS;i++)); do + log-info "attempting to evict agent ${spiffe_id} ($i of $MAXCHECKS max)..." + + docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent evict \ + -spiffeID ${spiffe_id} + docker compose logs spire-server + if docker compose logs spire-server | grep "Agent deleted"; then + exit 0 + fi + sleep "${CHECKINTERVAL}" +done diff --git a/test/integration/suites/evict-agent/08-agent-reattest-attempt b/test/integration/suites/evict-agent/08-agent-reattest-attempt new file mode 100755 index 00000000000..1ecb0dce9d3 --- /dev/null +++ b/test/integration/suites/evict-agent/08-agent-reattest-attempt @@ -0,0 +1,20 @@ +#!/bin/bash + +log-debug "agent re-attesting..." + +# Check at most 30 times (with one second in between) that the agent knows it can re-attest. +# This is not true "re-attestation" since when the agent was banned it removed its own SVID. +MAXCHECKS=30 +CHECKINTERVAL=1 +for ((i=1;i<=MAXCHECKS;i++)); do + log-info "checking for agent to get notification and try to reattest ($i of $MAXCHECKS max)..." + log-debug "starting agent again..." + docker-up spire-agent + docker compose logs spire-agent + if docker compose logs spire-agent | grep "SVID is not found. Starting node attestation"; then + exit 0 + fi + sleep "${CHECKINTERVAL}" +done + +fail-now "timed out waiting for agent to try to re-attest" diff --git a/test/integration/suites/evict-agent/08-evict-agent b/test/integration/suites/evict-agent/08-evict-agent deleted file mode 100755 index d7b5de12900..00000000000 --- a/test/integration/suites/evict-agent/08-evict-agent +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -log-debug "deleting agent..." - -# Check at most 30 times (with one second in between) that we can evict the agent, it may take a while for it to start up -MAXCHECKS=30 -CHECKINTERVAL=1 -for ((i=1;i<=MAXCHECKS;i++)); do - log-info "attempting to evict agent ($i of $MAXCHECKS max)..." - if docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent evict \ - -spiffeID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)"; then - exit 0 - fi - sleep "${CHECKINTERVAL}" -done - - -# Check at most 30 times (with one second in between) that the agent has to re-attest -MAXCHECKS=30 -CHECKINTERVAL=1 -for ((i=1;i<=MAXCHECKS;i++)); do - log-info "checking for agent to get notification and try to reattest ($i of $MAXCHECKS max)..." - docker compose logs spire-agent - if docker compose logs spire-agent | grep "Agent needs to re-attest; will attempt to re-attest"; then - exit 0 - fi - sleep "${CHECKINTERVAL}" -done - -# Check at most 30 times (with one second in between) that the agent has re-attested -MAXCHECKS=30 -CHECKINTERVAL=1 -for ((i=1;i<=MAXCHECKS;i++)); do - log-info "checking for agent to get notification and try to reattest ($i of $MAXCHECKS max)..." - docker compose logs spire-agent - if docker compose logs spire-agent | grep "Successfully reattested node"; then - exit 0 - fi - sleep "${CHECKINTERVAL}" -done - -fail-now "timed out waiting for agent to shut down" - diff --git a/test/integration/suites/evict-agent/09-agent-reattested b/test/integration/suites/evict-agent/09-agent-reattested new file mode 100755 index 00000000000..ed086920f00 --- /dev/null +++ b/test/integration/suites/evict-agent/09-agent-reattested @@ -0,0 +1,15 @@ +#!/bin/bash + +# Check at most 30 times (with one second in between) that the agent has re-attested +MAXCHECKS=30 +CHECKINTERVAL=1 +for ((i=1;i<=MAXCHECKS;i++)); do + log-info "checking for agent to get notification that it re-attested ($i of $MAXCHECKS max)..." + docker compose logs spire-agent + if docker compose logs spire-agent | grep "Node attestation was successful"; then + exit 0 + fi + sleep "${CHECKINTERVAL}" +done + +fail-now "timed out waiting for agent to re-attest" diff --git a/test/integration/suites/evict-agent/07-start-agent b/test/integration/suites/evict-agent/10-start-agent similarity index 83% rename from test/integration/suites/evict-agent/07-start-agent rename to test/integration/suites/evict-agent/10-start-agent index 1981d7b37ea..1597a12e149 100755 --- a/test/integration/suites/evict-agent/07-start-agent +++ b/test/integration/suites/evict-agent/10-start-agent @@ -1,7 +1,9 @@ #!/bin/bash log-debug "starting agent again..." - +log-debug "bringing agent down..." +docker-down spire-agent +log-debug "starting agent again..." docker-up spire-agent # Check at most 30 times (with one second in between) that the agent is back up diff --git a/test/integration/suites/fetch-x509-svids/04-create-registration-entries b/test/integration/suites/fetch-x509-svids/04-create-registration-entries index 6a3d23b3db0..318b53162db 100755 --- a/test/integration/suites/fetch-x509-svids/04-create-registration-entries +++ b/test/integration/suites/fetch-x509-svids/04-create-registration-entries @@ -10,7 +10,7 @@ for ((m=1;m<=$SIZE;m++)); do -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload-$m" \ -selector "unix:uid:1001" \ - -ttl 0 & + -x509SVIDTTL 0 & done for ((m=1;m<=$SIZE;m++)); do diff --git a/test/integration/suites/fetch-x509-svids/06-create-registration-entries b/test/integration/suites/fetch-x509-svids/06-create-registration-entries index 05ed54b1ac3..cb0f9333d60 100755 --- a/test/integration/suites/fetch-x509-svids/06-create-registration-entries +++ b/test/integration/suites/fetch-x509-svids/06-create-registration-entries @@ -10,7 +10,7 @@ for ((m=1;m<=$SIZE;m++)); do -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload/$m" \ -selector "unix:uid:1002" \ - -ttl 0 & + -x509SVIDTTL 0 & done for ((m=1;m<=$SIZE;m++)); do diff --git a/test/integration/suites/ghostunnel-federation/04-create-workload-entries b/test/integration/suites/ghostunnel-federation/04-create-workload-entries index 95f293e1005..00cc5b73428 100755 --- a/test/integration/suites/ghostunnel-federation/04-create-workload-entries +++ b/test/integration/suites/ghostunnel-federation/04-create-workload-entries @@ -9,7 +9,7 @@ docker compose exec -T downstream-spire-server \ -spiffeID "spiffe://downstream-domain.test/downstream-workload" \ -selector "unix:uid:0" \ -federatesWith "spiffe://upstream-domain.test" \ - -ttl 0 + -x509SVIDTTL 0 log-debug "creating registration entry for upstream workload..." docker compose exec -T upstream-spire-server \ @@ -18,4 +18,4 @@ docker compose exec -T upstream-spire-server \ -spiffeID "spiffe://upstream-domain.test/upstream-workload" \ -selector "unix:uid:0" \ -federatesWith "spiffe://downstream-domain.test" \ - -ttl 0 + -x509SVIDTTL 0 diff --git a/test/integration/suites/join-token/04-create-workload-entry b/test/integration/suites/join-token/04-create-workload-entry index c945899c4dc..a1d3b31555b 100755 --- a/test/integration/suites/join-token/04-create-workload-entry +++ b/test/integration/suites/join-token/04-create-workload-entry @@ -6,7 +6,9 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/node" \ -spiffeID "spiffe://domain.test/workload" \ -selector "unix:uid:0" \ - -ttl 0 + -x509SVIDTTL 0 \ + -jwtSVIDTTL 0 + # Check at most 30 times (with one second in between) that the agent has # successfully synced down the workload entry. diff --git a/test/integration/suites/k8s/conf/agent/spire-agent.yaml b/test/integration/suites/k8s/conf/agent/spire-agent.yaml index 4d1c9f21ad4..2cd61562d0a 100644 --- a/test/integration/suites/k8s/conf/agent/spire-agent.yaml +++ b/test/integration/suites/k8s/conf/agent/spire-agent.yaml @@ -69,10 +69,6 @@ data: # Minikube does not have a cert in the cluster CA bundle that # can authenticate the kubelet cert, so skip validation. skip_kubelet_verification = true - - # Use the new container locator method. This can be removed - # once it is on by default. - use_new_container_locator = true } } } diff --git a/test/integration/suites/nested-rotation/02-create-intermediate-downstream-entries b/test/integration/suites/nested-rotation/02-create-intermediate-downstream-entries index d5f5ed2bf68..3f4b496638b 100755 --- a/test/integration/suites/nested-rotation/02-create-intermediate-downstream-entries +++ b/test/integration/suites/nested-rotation/02-create-intermediate-downstream-entries @@ -7,7 +7,7 @@ docker compose exec -T root-server \ -spiffeID "spiffe://domain.test/intermediateA" \ -selector "docker:label:org.integration.name:intermediateA" \ -downstream \ - -ttl 3600 + -x509SVIDTTL 3600 check-synced-entry "root-agent" "spiffe://domain.test/intermediateA" log-debug "creating intermediateB downstream registration entry..." @@ -17,5 +17,5 @@ docker compose exec -T root-server \ -spiffeID "spiffe://domain.test/intermediateB" \ -selector "docker:label:org.integration.name:intermediateB" \ -downstream \ - -ttl 3600 + -x509SVIDTTL 3600 check-synced-entry "root-agent" "spiffe://domain.test/intermediateB" diff --git a/test/integration/suites/nested-rotation/04-create-leafA-downstream-entry b/test/integration/suites/nested-rotation/04-create-leafA-downstream-entry index 60b22ee3cb2..61d0b78b6f5 100755 --- a/test/integration/suites/nested-rotation/04-create-leafA-downstream-entry +++ b/test/integration/suites/nested-rotation/04-create-leafA-downstream-entry @@ -8,6 +8,6 @@ docker compose exec -T intermediateA-server \ -spiffeID "spiffe://domain.test/leafA" \ -selector "docker:label:org.integration.name:leafA" \ -downstream \ - -ttl 90 + -x509SVIDTTL 90 check-synced-entry "intermediateA-agent" "spiffe://domain.test/leafA" diff --git a/test/integration/suites/nested-rotation/07-create-leafB-downstream-entry b/test/integration/suites/nested-rotation/07-create-leafB-downstream-entry index ec419c107ff..2054bfec051 100755 --- a/test/integration/suites/nested-rotation/07-create-leafB-downstream-entry +++ b/test/integration/suites/nested-rotation/07-create-leafB-downstream-entry @@ -8,6 +8,6 @@ docker compose exec -T intermediateB-server \ -spiffeID "spiffe://domain.test/leafB" \ -selector "docker:label:org.integration.name:leafB" \ -downstream \ - -ttl 90 + -x509SVIDTTL 90 check-synced-entry "intermediateB-agent" "spiffe://domain.test/leafB" diff --git a/test/integration/suites/nested-rotation/09-create-workload-entries b/test/integration/suites/nested-rotation/09-create-workload-entries index c6061b977d7..c80851e22dc 100755 --- a/test/integration/suites/nested-rotation/09-create-workload-entries +++ b/test/integration/suites/nested-rotation/09-create-workload-entries @@ -6,7 +6,7 @@ docker compose exec -T intermediateA-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint intermediateA/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/intermediateA/workload" \ -selector "unix:uid:1001" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "intermediateA-agent" "spiffe://domain.test/intermediateA/workload" log-debug "creating leafA workload registration entry..." @@ -15,7 +15,7 @@ docker compose exec -T leafA-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint leafA/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/leafA/workload" \ -selector "unix:uid:1001" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "leafA-agent" "spiffe://domain.test/leafA/workload" log-debug "creating intermediateB workload registration entry..." @@ -24,7 +24,7 @@ docker compose exec -T intermediateB-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint intermediateB/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/intermediateB/workload" \ -selector "unix:uid:1001" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "intermediateB-agent" "spiffe://domain.test/intermediateB/workload" log-debug "creating leafB workload registration entry..." @@ -33,5 +33,5 @@ docker compose exec -T leafB-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint leafB/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/leafB/workload" \ -selector "unix:uid:1001" \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "leafB-agent" "spiffe://domain.test/leafB/workload" diff --git a/test/integration/suites/nested-rotation/intermediateA/agent/agent.conf b/test/integration/suites/nested-rotation/intermediateA/agent/agent.conf index f5aa52f0085..fa172266299 100644 --- a/test/integration/suites/nested-rotation/intermediateA/agent/agent.conf +++ b/test/integration/suites/nested-rotation/intermediateA/agent/agent.conf @@ -26,9 +26,6 @@ plugins { } WorkloadAttestor "docker" { plugin_data { - # Use the new container locator method. This can be removed - # once it is on by default. - use_new_container_locator = true } } } diff --git a/test/integration/suites/nested-rotation/intermediateB/agent/agent.conf b/test/integration/suites/nested-rotation/intermediateB/agent/agent.conf index 9c98377e1d9..54bcef5580f 100644 --- a/test/integration/suites/nested-rotation/intermediateB/agent/agent.conf +++ b/test/integration/suites/nested-rotation/intermediateB/agent/agent.conf @@ -26,9 +26,6 @@ plugins { } WorkloadAttestor "docker" { plugin_data { - # Use the new container locator method. This can be removed - # once it is on by default. - use_new_container_locator = true } } } diff --git a/test/integration/suites/nested-rotation/root/agent/agent.conf b/test/integration/suites/nested-rotation/root/agent/agent.conf index d572f4b4464..eb32fd7b807 100644 --- a/test/integration/suites/nested-rotation/root/agent/agent.conf +++ b/test/integration/suites/nested-rotation/root/agent/agent.conf @@ -22,9 +22,6 @@ plugins { } WorkloadAttestor "docker" { plugin_data { - # Use the new container locator method. This can be removed - # once it is on by default. - use_new_container_locator = true } } } diff --git a/test/integration/suites/node-attestation/04-test-x509pop-attestation b/test/integration/suites/node-attestation/04-test-x509pop-attestation index 32f3230bfd7..79ad304327e 100755 --- a/test/integration/suites/node-attestation/04-test-x509pop-attestation +++ b/test/integration/suites/node-attestation/04-test-x509pop-attestation @@ -7,7 +7,7 @@ docker compose exec -T spire-server \ -spiffeID "spiffe://domain.test/admin" \ -selector "unix:uid:1000" \ -admin \ - -ttl 0 + -x509SVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/admin" log-debug "running x509pop test..." diff --git a/test/integration/suites/oidc-discovery-provider/04-assert-jwks-using-workload-api b/test/integration/suites/oidc-discovery-provider/04-assert-jwks-using-workload-api index c0ec626ddfc..64953a7a80a 100755 --- a/test/integration/suites/oidc-discovery-provider/04-assert-jwks-using-workload-api +++ b/test/integration/suites/oidc-discovery-provider/04-assert-jwks-using-workload-api @@ -10,7 +10,8 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/oidc-provider" \ -selector "docker:label:org.integration.name:oidc-discovery-provider" \ - -ttl 0 + -x509SVIDTTL 0 \ + -jwtSVIDTTL 0 check-synced-entry "spire-agent" "spiffe://domain.test/oidc-provider" diff --git a/test/integration/suites/oidc-discovery-provider/conf/agent/agent.conf b/test/integration/suites/oidc-discovery-provider/conf/agent/agent.conf index 412d8c4996e..a7cfc3dc678 100644 --- a/test/integration/suites/oidc-discovery-provider/conf/agent/agent.conf +++ b/test/integration/suites/oidc-discovery-provider/conf/agent/agent.conf @@ -22,9 +22,6 @@ plugins { } WorkloadAttestor "docker" { plugin_data { - # Use the new container locator method. This can be removed - # once it is on by default. - use_new_container_locator = true } } } diff --git a/test/integration/suites/rotation/04-create-workload-entry b/test/integration/suites/rotation/04-create-workload-entry index 784ca9c291e..31e36c8c664 100755 --- a/test/integration/suites/rotation/04-create-workload-entry +++ b/test/integration/suites/rotation/04-create-workload-entry @@ -6,7 +6,7 @@ docker compose exec -T spire-server \ -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "unix:uid:0" \ - -ttl 0 + -x509SVIDTTL 0 # Check at most 30 times (with one second in between) that the agent has # successfully synced down the workload entry. diff --git a/test/integration/suites/spire-server-cli/03-entry b/test/integration/suites/spire-server-cli/03-entry index 5e7288d250a..a7a490475c5 100755 --- a/test/integration/suites/spire-server-cli/03-entry +++ b/test/integration/suites/spire-server-cli/03-entry @@ -33,7 +33,7 @@ docker compose exec -T spire-server \ -spiffeID spiffe://domain.test/otherChild \ -node \ -dns dnsname1 \ - -ttl 123 || fail-now "failed to create entry 3" + -x509SVIDTTL 123 || fail-now "failed to create entry 3" # Verify entry count correctly indicates three entries docker compose exec -T spire-server /opt/spire/bin/spire-server entry count | grep 3 || fail-now "failed to count 3 entries" @@ -139,7 +139,7 @@ docker compose exec -T spire-server \ -parentID spiffe://domain.test/parent \ -spiffeID spiffe://domain.test/child1 \ -federatesWith spiffe://federated1.test \ - -ttl 456 || fail-now "failed to update entry 1" + -x509SVIDTTL 456 || fail-now "failed to update entry 1" docker compose exec -T spire-server \ /opt/spire/bin/spire-server entry update \ diff --git a/test/integration/suites/spire-server-cli/06-agent b/test/integration/suites/spire-server-cli/06-agent index 662f48c3c1f..8703d104f63 100755 --- a/test/integration/suites/spire-server-cli/06-agent +++ b/test/integration/suites/spire-server-cli/06-agent @@ -1,105 +1,13 @@ #!/bin/bash # Verify agent count correctly indicates three agents -docker compose exec -T spire-server /opt/spire/bin/spire-server agent count | grep 3 || fail-now "failed to count 3 agents" - -# Verify 3 agents were created -listResult="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent list)" - -echo "$listResult" | grep "Found 3 attested agents" || fail-now "failed to list the 3 agents initially" -echo "$listResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected agents attestation type" - -# Get agent SPIFFE IDs from entries, knowing they were attested using join-token -# Agent 1 -agentID1="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server entry show \ - -spiffeID spiffe://domain.test/node1)" -agentID1="$(echo "$agentID1" | grep "Parent ID")" || fail-now "failed to extract agentID1" -agentID1="${agentID1#*: }" - -# Agent 2 -agentID2="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server entry show \ - -spiffeID spiffe://domain.test/node2)" -agentID2="$(echo "$agentID2" | grep "Parent ID")" || fail-now "failed to extract agentID2" -agentID2="${agentID2#*: }" - -# Agent 3 -agentID3="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server entry show \ - -spiffeID spiffe://domain.test/node3)" -agentID3="$(echo "$agentID3" | grep "Parent ID")" || fail-now "failed to extract agentID3" -agentID3="${agentID3#*: }" - -# Verify agentIDs match -echo "$listResult" | grep "$agentID1" || fail-now "agentID1=$agentID1 not found in agentIDs list" -echo "$listResult" | grep "$agentID2" || fail-now "agentID2=$agentID2 not found in agentIDs list" -echo "$listResult" | grep "$agentID3" || fail-now "agentID3=$agentID3 not found in agentIDs list" - -# Verify agent show -# Agent 1 -showResult="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID1)" - -echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 1" -echo "$showResult" | grep "SPIFFE ID" | grep "$agentID1" || fail-now "unexpected SPIFFE ID for agent 1" -echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 1" - -# Agent 2 -showResult="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID2)" - -echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 2" -echo "$showResult" | grep "SPIFFE ID" | grep "$agentID2" || fail-now "unexpected SPIFFE ID for agent 2" -echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 2" - -# Agent 3 -showResult="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID3)" - -echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 3" -echo "$showResult" | grep "SPIFFE ID" | grep "$agentID3" || fail-now "unexpected SPIFFE ID for agent 3" -echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 3" - -# Verify agent ban -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent ban -spiffeID "$agentID1" | grep "Agent banned successfully" || fail-now "failed to ban agent 1" - -# Verify agent list after ban -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent list | grep "Found 3 attested agents" || fail-now "failed to list the agents after ban" - -# Verify agent show after ban Agent 1 -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID1 | grep "Banned : true" || fail-now "agent 1 was not banned" - -# Verify agent evict -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent evict -spiffeID "$agentID1" | grep "Agent evicted successfully" || fail-now "failed to evict agent 1" - -# Verify agent list after evict -docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent list | grep "Found 2 attested agents" || fail-now "failed to list the agents after evict" - -# Verify agent show after evict -# Agent 1 -echo "$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID1 || echo "OK: agent 1 not found")" \ - | grep "OK: agent 1 not found" || fail-now "agent 1 was found after evict" - -# Agent 2 -showResult="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID2)" - -echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 2 after evict" -echo "$showResult" | grep "SPIFFE ID" | grep "$agentID2" || fail-now "unexpected SPIFFE ID for agent 2 after evict" -echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 2 after evict" - -# Agent 3 -showResult="$(docker compose exec -T spire-server \ - /opt/spire/bin/spire-server agent show -spiffeID $agentID3)" - -echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 3 after evict" -echo "$showResult" | grep "SPIFFE ID" | grep "$agentID3" || fail-now "unexpected SPIFFE ID for agent 3 after evict" -echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 3 after evict" +MAXCHECKS=30 +CHECKINTERVAL=1 +for ((i=1;i<=MAXCHECKS;i++)); do + log-info "checking that the server counts 3 agents ($i of $MAXCHECKS max)..." + if docker compose exec -T spire-server /opt/spire/bin/spire-server agent count | grep 3; then + exit 0 + fi + sleep "${CHECKINTERVAL}" +done +fail-now "failed to count 3 agents" diff --git a/test/integration/suites/spire-server-cli/07-agent-details b/test/integration/suites/spire-server-cli/07-agent-details new file mode 100755 index 00000000000..ee13b95fc0f --- /dev/null +++ b/test/integration/suites/spire-server-cli/07-agent-details @@ -0,0 +1,113 @@ +#!/bin/bash + +# Verify the 3 agents were created +log-info "listing agents..." +listResult="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent list)" + +echo "$listResult" +echo "$listResult" | grep "Found 3 attested agents" || fail-now "failed to list the 3 agents initially" +echo "$listResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected agents attestation type" + +# Get agent SPIFFE IDs from entries, knowing they were attested using join-token +log-info "verifying details for each agent from list..." +# Agent 1 +agentID1="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server entry show \ + -spiffeID spiffe://domain.test/node1)" +agentID1="$(echo "$agentID1" | grep "Parent ID")" || fail-now "failed to extract agentID1" +agentID1="${agentID1#*: }" + +# Agent 2 +agentID2="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server entry show \ + -spiffeID spiffe://domain.test/node2)" +agentID2="$(echo "$agentID2" | grep "Parent ID")" || fail-now "failed to extract agentID2" +agentID2="${agentID2#*: }" + +# Agent 3 +agentID3="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server entry show \ + -spiffeID spiffe://domain.test/node3)" +agentID3="$(echo "$agentID3" | grep "Parent ID")" || fail-now "failed to extract agentID3" +agentID3="${agentID3#*: }" + +# Verify agentIDs match +echo "$listResult" | grep "$agentID1" || fail-now "agentID1=$agentID1 not found in agentIDs list" +echo "$listResult" | grep "$agentID2" || fail-now "agentID2=$agentID2 not found in agentIDs list" +echo "$listResult" | grep "$agentID3" || fail-now "agentID3=$agentID3 not found in agentIDs list" + +# Verify agent show +log-info "verifying details for each agent from show..." +# Agent 1 +showResult="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID1)" + +echo "$showResult" +echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 1" +echo "$showResult" | grep "SPIFFE ID" | grep "$agentID1" || fail-now "unexpected SPIFFE ID for agent 1" +echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 1" + +# Agent 2 +showResult="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID2)" + +echo "$showResult" +echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 2" +echo "$showResult" | grep "SPIFFE ID" | grep "$agentID2" || fail-now "unexpected SPIFFE ID for agent 2" +echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 2" + +# Agent 3 +showResult="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID3)" + +echo "$showResult" +echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 3" +echo "$showResult" | grep "SPIFFE ID" | grep "$agentID3" || fail-now "unexpected SPIFFE ID for agent 3" +echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 3" + +# Verify agent ban +log-info "banning and evicting agent 1..." +docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent ban -spiffeID "$agentID1" | grep "Agent banned successfully" || fail-now "failed to ban agent 1" + +# Verify agent list after ban +docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent list | grep "Found 3 attested agents" || fail-now "failed to list the agents after ban" + +# Verify agent show after ban Agent 1 +docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID1 | grep "Banned : true" || fail-now "agent 1 was not banned" + +# Verify agent evict +docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent evict -spiffeID "$agentID1" | grep "Agent evicted successfully" || fail-now "failed to evict agent 1" + +# Verify agent list after evict +docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent list | grep "Found 2 attested agents" || fail-now "failed to list the agents after evict" + +# Verify agent show after evict +log-info "verifying new agent show..." +# Agent 1 +echo "$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID1 || echo "OK: agent 1 not found")" \ + | grep "OK: agent 1 not found" || fail-now "agent 1 was found after evict" + +# Agent 2 +showResult="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID2)" + +echo "$showResult" +echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 2 after evict" +echo "$showResult" | grep "SPIFFE ID" | grep "$agentID2" || fail-now "unexpected SPIFFE ID for agent 2 after evict" +echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 2 after evict" + +# Agent 3 +showResult="$(docker compose exec -T spire-server \ + /opt/spire/bin/spire-server agent show -spiffeID $agentID3)" + +echo "$showResult" +echo "$showResult" | grep "Found an attested agent given its SPIFFE ID" || fail-now "failed to show agent 3 after evict" +echo "$showResult" | grep "SPIFFE ID" | grep "$agentID3" || fail-now "unexpected SPIFFE ID for agent 3 after evict" +echo "$showResult" | grep "Attestation type" | grep "join_token" || fail-now "unexpected attestation type for agent 3 after evict" diff --git a/test/integration/suites/upgrade/01-run-upgrade-tests b/test/integration/suites/upgrade/01-run-upgrade-tests index 8909633391d..d51d3a16962 100755 --- a/test/integration/suites/upgrade/01-run-upgrade-tests +++ b/test/integration/suites/upgrade/01-run-upgrade-tests @@ -41,7 +41,7 @@ create-registration-entry() { -parentID "spiffe://domain.test/spire/agent/x509pop/$(fingerprint conf/agent/agent.crt.pem)" \ -spiffeID "spiffe://domain.test/workload" \ -selector "unix:uid:${UID}" \ - -ttl 0 + -x509SVIDTTL 0 # Check at most 30 times (with one second in between) that the agent has # successfully synced down the workload entry. diff --git a/test/integration/suites/upgrade/versions.txt b/test/integration/suites/upgrade/versions.txt index 2da4f09fbf1..9697b97d5bf 100644 --- a/test/integration/suites/upgrade/versions.txt +++ b/test/integration/suites/upgrade/versions.txt @@ -1,9 +1,5 @@ -1.9.0 -1.9.1 -1.9.2 -1.9.3 -1.9.4 -1.9.5 -1.9.6 1.10.0 1.10.1 +1.10.2 +1.10.3 +1.10.4 diff --git a/test/integration/suites/upstream-authority-ejbca/00-setup-kind b/test/integration/suites/upstream-authority-ejbca/00-setup-kind new file mode 100755 index 00000000000..390ff7c8e15 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/00-setup-kind @@ -0,0 +1,28 @@ +#!/bin/bash + +# Create a temporary path that will be added to the PATH to avoid picking up +# binaries from the environment that aren't a version match. +mkdir -p ./bin + +KIND_PATH=./bin/kind +KUBECTL_PATH=./bin/kubectl +HELM_PATH=./bin/helm + +# Download kind at the expected version at the given path. +download-kind "${KIND_PATH}" + +# Download kubectl at the expected version. +download-kubectl "${KUBECTL_PATH}" + +# Download helm at the expected version. +download-helm "${HELM_PATH}" + +# Start the kind cluster. +start-kind-cluster "${KIND_PATH}" ejbca-test + +# Load the given images in the cluster. +container_images=("spire-server:latest-local") +load-images "${KIND_PATH}" ejbca-test "${container_images[@]}" + +# Set the kubectl context. +set-kubectl-context "${KUBECTL_PATH}" kind-ejbca-test diff --git a/test/integration/suites/upstream-authority-ejbca/01-setup-ejbca b/test/integration/suites/upstream-authority-ejbca/01-setup-ejbca new file mode 100755 index 00000000000..d595ba7205d --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/01-setup-ejbca @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e -o pipefail +source init-kubectl + +log-info "installing ejbca..." + +EJBCA_NAMESPACE="ejbca" +EJBCA_MTLS_SECRET_NAME="superadmin-tls" +EJBCA_SUBCA_SECRET_NAME="subca" + +cd conf +./deploy.sh --ejbca-namespace "$EJBCA_NAMESPACE" --superadmin-secret-name "$EJBCA_MTLS_SECRET_NAME" --subca-secret-name "$EJBCA_SUBCA_SECRET_NAME" +cd .. diff --git a/test/integration/suites/upstream-authority-ejbca/02-deploy-spire b/test/integration/suites/upstream-authority-ejbca/02-deploy-spire new file mode 100755 index 00000000000..8bb4fc3e8c9 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/02-deploy-spire @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e -o pipefail +source init-kubectl + +EJBCA_NAMESPACE="ejbca" +EJBCA_MTLS_SECRET_NAME="superadmin-tls" +EJBCA_SUBCA_SECRET_NAME="subca" + +log-info "installing spire..." +./bin/kubectl create namespace spire + +secrets=( + "$EJBCA_MTLS_SECRET_NAME" + "$EJBCA_SUBCA_SECRET_NAME" +) +for secret in "${secrets[@]}"; do + ./bin/kubectl --namespace "$EJBCA_NAMESPACE" get secret "$secret" -o yaml \ + | sed 's/namespace: .*/namespace: spire/' \ + | kubectl apply -f - +done + +kubectl -n spire apply -k conf/server +./bin/kubectl wait pods -n spire -l app=spire-server --for condition=Ready --timeout=60s diff --git a/test/integration/suites/upstream-authority-ejbca/03-verify-ca b/test/integration/suites/upstream-authority-ejbca/03-verify-ca new file mode 100755 index 00000000000..1487fbabd41 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/03-verify-ca @@ -0,0 +1,118 @@ +#!/bin/bash + +set -e -o pipefail +source init-kubectl + +EJBCA_NAMESPACE="ejbca" +EJBCA_SUBCA_SECRET_NAME="subca" + +log-debug "verifying CA..." + +cert_start="-----BEGIN CERTIFICATE-----" +cert_end="-----END CERTIFICATE-----" + +# First, collect the CA chain from the K8s secret created by the EJCBA +# deployment script. We expect this secret to have the full chain up to the root. + +i=0 +while read -r line; do + if [[ "$line" == "$cert_start" ]]; then + cert="$line"$'\n' + in_cert=1 + elif [[ "$line" == "$cert_end" ]]; then + cert+="$line"$'\n' + chain[i]=$(echo "$cert") + i=$((i + 1)) + in_cert=0 + elif [[ $in_cert -eq 1 ]]; then + cert+="$line"$'\n' + fi +done < <(kubectl --namespace "$EJBCA_NAMESPACE" get secret "$EJBCA_SUBCA_SECRET_NAME" -o jsonpath='{.data.ca\.crt}' | base64 -d) + +log-debug "the issuing ca in EJBCA has a chain length of ${#chain[@]} certificates (including the root)" + +# Second, mint an x509 SVID from the SPIRE server and collect them into an array. +# +# The contents of mintx509svid_out should have the following format: +# +# X509-SVID: +# +# +# +# +# Private key: +# +# +# Root CAs: +# + +# So, the contents of `certs` should be the entire certificate chain, starting +# with the x509 svid at index 0, up to the root CA at index i. + +i=0 +while read -r line; do + if [[ "$line" == "$cert_start" ]]; then + cert="$line"$'\n' + in_cert=1 + elif [[ "$line" == "$cert_end" ]]; then + cert+="$line"$'\n' + certs[i]=$(echo "$cert") + i=$((i + 1)) + in_cert=0 + elif [[ $in_cert -eq 1 ]]; then + cert+="$line"$'\n' + fi +done < <(./bin/kubectl exec -n spire $(./bin/kubectl get pod -n spire -o name) -- /opt/spire/bin/spire-server x509 mint -spiffeID spiffe://example.org/ns/foo/sa/bar) + +log-debug "the x509 svid has a chain length of ${#certs[@]} certificates (including the svid and root)" + +# Verify that the SPIRE server is using the EJBCA UpstreamAuthority by comparing the CA chain + +log-debug "verifying that the intermediate ca(s) and root ca from the svid are the EJBCA issuing ca/intermediates and root ca" + +i=0 +while [[ $i -lt ${#chain[@]} ]]; do + expected_hash=$(echo "${chain[$i]}" | openssl x509 -noout -modulus | openssl sha256 | awk '{print $2}') + + corresponding_certs_index=$((${#certs[@]} - ${#chain[@]} + i)) + actual_hash=$(echo "${certs[$corresponding_certs_index]}" | openssl x509 -noout -modulus | openssl sha256 | awk '{print $2}') + if [[ "$expected_hash" != "$actual_hash" ]]; then + fail-now "ca chain verification failed: expected modulus to have hash $expected_hash, got $actual_hash (cert $((i+1))/${#chain[@]})" + fi + i=$((i + 1)) +done + +log-debug "verifying that the x509 svid was signed by the spire intermediate ca, and that the spire intermediate ca has a valid chain up to the root ca in EJBCA" + +# We use -untrusted since none of the intermediates are trusted roots - IE, verify the whole chain +# Also, we verify against the CA chain from EJBCA to make extra sure that the SVID was signed by the correct CA +# We trust SPIRE to build a valid certificate chain, but we want to make sure that the SVID is part of the correct PKI. + +root_ca=("${chain[@]:((${#chain[@]} - 1)):1}") +full_chain=("${certs[1]}" "${chain[@]:0:${#chain[@]}-1}") + +# SPIRE requested the second certificate in certs +if ! openssl verify -CAfile <(printf "%s\n" "${root_ca[@]}") \ + -untrusted <(printf "%s\n" "${full_chain[@]}") \ + <(echo "${certs[0]}"); +then + fail-now "x509 svid verification failed: failed to verify the x509 svid up to the root ca in EJBCA" +fi + +log-debug "verifying that the x509 svid has the expected uri san" + +# Make sure that the x509 SVID has the correct URI +expectedURI="URI:spiffe://example.org/ns/foo/sa/bar" +actualURI=$(openssl x509 -noout -text -in <(echo "${certs[0]}") | grep URI | sed 's/^ *//g') +if [[ "$expectedURI" != "$actualURI" ]]; then + fail-now "x509 svid verification failed: expected URI to be $expectedURI, got $actualURI" +fi + +log-debug "verifying that the intermediate ca issued by EJBCA has the expected uri san" + +# Make sure that the intermediate CA has the correct URI +expectedURI="URI:spiffe://example.org" +actualURI=$(openssl x509 -noout -text -in <(echo "${certs[1]}") | grep URI | sed 's/^ *//g') +if [[ "$expectedURI" != "$actualURI" ]]; then + fail-now "x509 svid verification failed: expected URI to be $expectedURI, got $actualURI" +fi diff --git a/test/integration/suites/upstream-authority-ejbca/README.md b/test/integration/suites/upstream-authority-ejbca/README.md new file mode 100644 index 00000000000..90654bd5059 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/README.md @@ -0,0 +1,8 @@ +# Upstream Authority ejbca Suite + +## Description + +This suite sets up a single node Kubernetes cluster using [Kind](https://kind.sigs.k8s.io), deploys and configures EJBCA Community, and then asserts the following: + +1. SPIRE Server successfully requests an intermediate CA from EJBCA. +2. Verifies that workload x509s have been signed by that intermediate CA, and that EJBCA is the root of trust. diff --git a/test/integration/suites/upstream-authority-ejbca/conf/deploy.sh b/test/integration/suites/upstream-authority-ejbca/conf/deploy.sh new file mode 100755 index 00000000000..113f21eb267 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/deploy.sh @@ -0,0 +1,401 @@ +#!/bin/bash + +EJBCA_NAMESPACE=ejbca +EJBCA_IMAGE="keyfactor/ejbca-ce" +EJBCA_TAG="latest" + +IMAGE_PULL_SECRET_NAME="" + +EJBCA_SUPERADMIN_SECRET_NAME="superadmin-tls" +EJBCA_MANAGEMENTCA_SECRET_NAME="managementca" +EJBCA_SUBCA_SECRET_NAME="subca" + +EJBCA_ROOT_CA_NAME="Root-CA" +EJBCA_SUB_CA_NAME="Sub-CA" + +# Verify that required tools are installed +verifySupported() { + HAS_HELM="$(type "helm" &>/dev/null && echo true || echo false)" + HAS_KUBECTL="$(type "kubectl" &>/dev/null && echo true || echo false)" + HAS_JQ="$(type "jq" &>/dev/null && echo true || echo false)" + HAS_CURL="$(type "curl" &>/dev/null && echo true || echo false)" + HAS_OPENSSL="$(type "openssl" &>/dev/null && echo true || echo false)" + + if [ "${HAS_JQ}" != "true" ]; then + echo "jq is required" + exit 1 + fi + + if [ "${HAS_CURL}" != "true" ]; then + echo "curl is required" + exit 1 + fi + + if [ "${HAS_HELM}" != "true" ]; then + echo "helm is required" + exit 1 + fi + + if [ "${HAS_KUBECTL}" != "true" ]; then + echo "kubectl is required" + exit 1 + fi + + if [ "${HAS_OPENSSL}" != "true" ]; then + echo "openssl is required" + exit 1 + fi +} + +############################################### +# EJBCA CA Creation and Initialization # +############################################### + +createConfigmapFromFile() { + local cluster_namespace=$1 + local configmap_name=$2 + local filepath=$3 + + if [ $(kubectl get configmap -n "$cluster_namespace" -o json | jq -c ".items | any(.[] | .metadata; .name == \"$configmap_name\")") == "false" ]; then + echo "Creating "$configmap_name" configmap" + kubectl create configmap -n "$cluster_namespace" "$configmap_name" --from-file="$filepath" + else + echo "$configmap_name exists" + fi +} + +# Figure out if the cluster is already initialized for EJBCA +isEjbcaAlreadyDeployed() { + deployed=false + if [ ! "$(kubectl --namespace "$EJBCA_NAMESPACE" get pods -l app.kubernetes.io/name=ejbca -o json | jq '.items[] | select(.metadata.labels."app.kubernetes.io/name" == "ejbca") | .metadata.name' | tr -d '"')" != "" ]; then + echo "EJBCA is not deployed - EJBCA pod is not present" + return 1 + fi + + if [[ ! $(kubectl get secret --namespace "$EJBCA_NAMESPACE" -o json | jq --arg "name" "$EJBCA_SUPERADMIN_SECRET_NAME" -e '.items[] | select(.metadata.name == $name)') ]]; then + echo "EJBCA is not deployed - SuperAdmin secret is not present" + return 1 + fi + + if [[ ! $(kubectl get secret --namespace "$EJBCA_NAMESPACE" -o json | jq --arg "name" "$EJBCA_SUPERADMIN_SECRET_NAME" -e '.items[] | select(.metadata.name == $name)') ]]; then + echo "EJBCA is not deployed - ManagementCA secret is not present" + return 1 + fi + + if [[ ! $(kubectl get secret --namespace "$EJBCA_NAMESPACE" -o json | jq --arg "name" "$EJBCA_SUPERADMIN_SECRET_NAME" -e '.items[] | select(.metadata.name == $name)') ]]; then + echo "EJBCA is not deployed - SubCA secret is not present" + return 1 + fi + + return 0 +} + +certificate_exists() { + if [[ $(kubectl get certificate -o json | jq -r '.items.[] | select(.metadata.name == "ejbca-certificate")') == "" ]]; then + return 1 + else + return 0 + fi +} + +# Waits for the EJBCA node to be ready +# cluster_namespace - The namespace where the EJBCA node is running +# ejbca_pod_name - The name of the Pod running the EJBCA node +waitForEJBCANode() { + local cluster_namespace=$1 + local ejbca_pod_name=$2 + + echo "Waiting for EJBCA node to be ready" + until ! kubectl -n "$cluster_namespace" exec "$ejbca_pod_name" -- /opt/keyfactor/bin/ejbca.sh 2>&1 | grep -q "could not contact EJBCA"; do + echo "EJBCA node not ready yet, retrying in 5 seconds..." + sleep 5 + done + echo "EJBCA node $cluster_namespace/$ejbca_pod_name is ready." +} + +configmapNameFromFilename() { + local filename=$1 + echo "$(basename "$filename" | tr _ - | tr '[:upper:]' '[:lower:]')" +} + +# Initialize the cluster for EJBCA +initClusterForEJBCA() { + # Create the EJBCA namespace if it doesn't already exist + if [ "$(kubectl get namespace -o json | jq -e '.items[] | select(.metadata.name == "'"$EJBCA_NAMESPACE"'") | .metadata.name')" == "" ]; then + kubectl create namespace "$EJBCA_NAMESPACE" + fi + + # Mount the staged EEPs & CPs to Kubernetes with ConfigMaps + for file in $(find ./ejbca/staging -maxdepth 1 -mindepth 1); do + configmapname="$(basename "$file")" + createConfigmapFromFile "$EJBCA_NAMESPACE" "$(configmapNameFromFilename "$configmapname")" "$file" + done + + # Mount the ejbca init script to Kubernetes using a ConigMap + createConfigmapFromFile "$EJBCA_NAMESPACE" "ejbca-init" "./ejbca/scripts/ejbca-init.sh" +} + +# Clean up the config maps used to init the EJBCA database +cleanupEJBCAConfigMaps() { + for file in $(find ./ejbca/staging -maxdepth 1 -mindepth 1); do + configMapName="$(configmapNameFromFilename "$file")" + kubectl delete configmap --namespace "$EJBCA_NAMESPACE" "$configMapName" + done +} + +# Initialze the database by spinning up an instance of EJBCA infront of a MariaDB database, and +# create the CA hierarchy and import boilerplate profiles. +initEJBCADatabase() { + helm_install_args=( + "--namespace" + "$EJBCA_NAMESPACE" + "install" + "ejbca-test" + "./ejbca" + "--set" "ejbca.ingress.enabled=false" + ) + + container_staging_dir="/opt/keyfactor/stage" + index=0 + for file in $(find ./ejbca/staging -maxdepth 1 -mindepth 1); do + configMapName="$(configmapNameFromFilename "$file")" + volume_name="$(echo "$configMapName" | sed 's/\.[^.]*$//')" + + helm_install_args+=("--set" "ejbca.volumes[$index].name=$volume_name") + helm_install_args+=("--set" "ejbca.volumes[$index].configMapName=$configMapName") + helm_install_args+=("--set" "ejbca.volumes[$index].mountPath=$container_staging_dir/$configMapName") + index=$((index + 1)) + done + + helm_install_args+=("--set" "ejbca.volumes[$index].name=ejbca-init") + helm_install_args+=("--set" "ejbca.volumes[$index].configMapName=ejbca-init") + helm_install_args+=("--set" "ejbca.volumes[$index].mountPath=/tmp/") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[0].name=EJBCA_SUPERADMIN_COMMONNAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[0].value=SuperAdmin") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[1].name=EJBCA_SUPERADMIN_SECRET_NAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[1].value=$EJBCA_SUPERADMIN_SECRET_NAME") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[2].name=EJBCA_MANAGEMENTCA_SECRET_NAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[2].value=$EJBCA_MANAGEMENTCA_SECRET_NAME") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[2].name=EJBCA_SUBCA_SECRET_NAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[2].value=$EJBCA_SUBCA_SECRET_NAME") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[3].name=EJBCA_ROOT_CA_NAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[3].value=$EJBCA_ROOT_CA_NAME") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[4].name=EJBCA_SUB_CA_NAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[4].value=$EJBCA_SUB_CA_NAME") + + k8s_reverseproxy_service_fqdn="ejbca-rp-service.$EJBCA_NAMESPACE.svc.cluster.local" + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[5].name=EJBCA_CLUSTER_REVERSEPROXY_FQDN") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[5].value=$k8s_reverseproxy_service_fqdn") + + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[6].name=EJBCA_RP_TLS_SECRET_NAME") + helm_install_args+=("--set" "ejbca.extraEnvironmentVars[6].value=ejbca-reverseproxy-tls") + + helm_install_args+=("--set" "ejbca.image.repository=$EJBCA_IMAGE") + helm_install_args+=("--set" "ejbca.image.tag=$EJBCA_TAG") + if [ ! -z "$IMAGE_PULL_SECRET_NAME" ]; then + helm_install_args+=("--set" "ejbca.image.pullSecrets[0].name=$IMAGE_PULL_SECRET_NAME") + fi + + if ! helm "${helm_install_args[@]}" ; then + echo "Failed to install EJBCA" + kubectl delete namespace "$EJBCA_NAMESPACE" + exit 1 + fi + + # Wait for the EJBCA Pod to be ready + echo "Waiting for EJBCA Pod to be ready" + kubectl --namespace "$EJBCA_NAMESPACE" wait --for=condition=Available deployment -l app.kubernetes.io/name=ejbca --timeout=300s + kubectl --namespace "$EJBCA_NAMESPACE" wait --for=condition=Ready pod -l app.kubernetes.io/name=ejbca --timeout=300s + + # Get the name of the EJBCA Pod + local ejbca_pod_name + ejbca_pod_name=$(kubectl --namespace "$EJBCA_NAMESPACE" get pods -l app.kubernetes.io/name=ejbca -o json | jq '.items[] | select(.metadata.labels."app.kubernetes.io/name" == "ejbca") | .metadata.name' | tr -d '"') + + if [ "$ejbca_pod_name" == "" ]; then + echo "Failed to get the name of the EJBCA Pod" + kubectl delete ns "$EJBCA_NAMESPACE" + exit 1 + fi + + # Wait for the EJBCA Pod to be ready + waitForEJBCANode "$EJBCA_NAMESPACE" "$ejbca_pod_name" + + # Execute the EJBCA init script + args=( + --namespace "$EJBCA_NAMESPACE" exec "$ejbca_pod_name" -- + bash -c 'cp /tmp/ejbca-init.sh /opt/keyfactor/bin/ejbca-init.sh && chmod +x /opt/keyfactor/bin/ejbca-init.sh && /opt/keyfactor/bin/ejbca-init.sh' + ) + if ! kubectl "${args[@]}" ; then + echo "Failed to execute the EJBCA init script" + kubectl delete ns "$EJBCA_NAMESPACE" + exit 1 + fi + + # Uninstall the EJBCA helm chart - database is peristent + helm --namespace "$EJBCA_NAMESPACE" uninstall ejbca-test + cleanupEJBCAConfigMaps +} + +# Deploy EJBCA with ingress enabled +deployEJBCA() { + # Package and deploy the EJBCA helm chart with ingress enabled + helm_install_args=( + "--namespace" + "$EJBCA_NAMESPACE" + "install" + "ejbca-test" + "./ejbca" + "--set" + "ejbca.ingress.enabled=false" + ) + helm_install_args+=("--set" "ejbca.reverseProxy.enabled=true") + + helm_install_args+=("--set" "ejbca.image.repository=$EJBCA_IMAGE") + helm_install_args+=("--set" "ejbca.image.tag=$EJBCA_TAG") + if [ ! -z "$IMAGE_PULL_SECRET_NAME" ]; then + helm_install_args+=("--set" "ejbca.image.pullSecrets[0].name=$IMAGE_PULL_SECRET_NAME") + fi + + if ! helm "${helm_install_args[@]}" ; then + echo "Failed to install EJBCA" + exit 1 + fi + + sleep 20 + + # Wait for the EJBCA Pod to be ready + echo "Waiting for EJBCA Pod to be ready" + kubectl --namespace "$EJBCA_NAMESPACE" wait --for=condition=ready pod -l app.kubernetes.io/instance=ejbca-test --timeout=300s + + # Get the name of the EJBCA Pod + local ejbca_pod_name + ejbca_pod_name=$(kubectl --namespace "$EJBCA_NAMESPACE" get pods -l app.kubernetes.io/name=ejbca -o json | jq '.items[] | select(.metadata.labels."app.kubernetes.io/name" == "ejbca") | .metadata.name' | tr -d '"') + + # Wait for the EJBCA node to be ready + waitForEJBCANode "$EJBCA_NAMESPACE" "$ejbca_pod_name" + + sleep 5 +} + +uninstallEJBCA() { + if ! isEjbcaAlreadyDeployed; then + echo "EJBCA is not deployed" + return 1 + fi + + helm --namespace "$EJBCA_NAMESPACE" uninstall ejbca-test + + kubectl delete namespace "$EJBCA_NAMESPACE" +} + +############################################### +# Helper Functions # +############################################### + +mariadbPvcExists() { + local namespace=$1 + + if [ "$(kubectl --namespace "$namespace" get pvc -l app.kubernetes.io/name=mariadb -o json | jq '.items[] | select(.metadata.labels."app.kubernetes.io/name" == "mariadb") | .metadata.name' | tr -d '"')" != "" ]; then + return 0 + else + return 1 + fi +} + +usage() { + echo "Usage: $0 [options...]" + echo "Options:" + echo " --ejbca-image Set the image to use for the EJBCA node. Defaults to keyfactor/ejbca-ce" + echo " --ejbca-tag Set the tag to use for the EJBCA node. Defaults to latest" + echo " --image-pull-secret Use a particular image pull secret in the ejbca namespace for the EJBCA node. Defaults to none" + echo " --ejbca-namespace Set the namespace to deploy the EJBCA node in. Defaults to ejbca" + echo " --superadmin-secret-name The name of the secret that will be created containing the SuperAdmin (client certificate)" + echo " --managementca-secret-name The name of the secret that will be created containing the ManagementCA certificate" + echo " --subca-secret-name The name of the secret that will be created containing the SubCA certificate and chain" + echo " --uninstall Uninstall EJBCA and SignServer" + echo " -h, --help Show this help message" + exit 1 +} + +# Verify that required tools are installed +verifySupported + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --ejbca-namespace) + EJBCA_NAMESPACE="$2" + shift # past argument + shift # past value + ;; + --ejbca-image) + EJBCA_IMAGE="$2" + shift # past argument + shift # past value + ;; + --superadmin-secret-name) + EJBCA_SUPERADMIN_SECRET_NAME="$2" + shift # past argument + shift # past value + ;; + --managementca-secret-name) + EJBCA_MANAGEMENTCA_SECRET_NAME="$2" + shift # past argument + shift # past value + ;; + --subca-secret-name) + EJBCA_SUBCA_SECRET_NAME="$2" + shift # past argument + shift # past value + ;; + --ejbca-tag) + EJBCA_TAG="$2" + shift # past argument + shift # past value + ;; + --image-pull-secret) + IMAGE_PULL_SECRET_NAME="$2" + shift # past argument + shift # past value + ;; + --uninstall) + uninstallEJBCA + exit 0 + ;; + -h|--help) + usage + exit 0 + ;; + *) # unknown option + echo "Unknown option: $1" + usage + exit 1 + ;; + esac +done + +# Figure out if the cluster is already initialized for EJBCA +if ! isEjbcaAlreadyDeployed; then + if mariadbPvcExists "$EJBCA_NAMESPACE"; then + echo "The EJBCA database has already been configured - skipping database initialization" + + # Deploy EJBCA with ingress enabled + deployEJBCA + else + # Prepare the cluster for EJBCA + initClusterForEJBCA + + # Initialize the database by spinning up an instance of EJBCA infront of a MariaDB database, and then + # create the CA hierarchy and import boilerplate profiles. + initEJBCADatabase + + # Deploy EJBCA with ingress enabled + deployEJBCA + fi +fi diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/.helmignore b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/Chart.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/Chart.yaml new file mode 100644 index 00000000000..5d4aff43592 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: ejbca +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/scripts/ejbca-init.sh b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/scripts/ejbca-init.sh new file mode 100755 index 00000000000..d553dd45154 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/scripts/ejbca-init.sh @@ -0,0 +1,355 @@ +#!/bin/bash + +########### +# General # +########### + +ejbcactl() { + local args=("${@:1}") + + echo "ejbca.sh ${args[*]}" + + if ! /opt/keyfactor/bin/ejbca.sh "${args[@]}" ; then + echo "ejbca.sh failed with args: ${args[*]}" + exit 1 + fi + + return 0 +} + + +############## +# Kubernetes # +############## + +createK8sTLSSecret() { + local secret_name=$1 + local cert_file=$2 + local key_file=$3 + + namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + secret_url="https://$KUBERNETES_PORT_443_TCP_ADDR:$KUBERNETES_SERVICE_PORT_HTTPS/api/v1/namespaces/$namespace/secrets" + ca_cert_path="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) + + cert=$(cat $cert_file | base64 | tr -d '\n') + key=$(cat $key_file | base64 | tr -d '\n') + + read -r -d '' PAYLOAD < $tokenProperties +echo "crlSignKey signKey" >> $tokenProperties +echo "keyEncryptKey encryptKey" >> $tokenProperties +echo "testKey testKey" >> $tokenProperties +echo "defaultKey encryptKey" >> $tokenProperties + +root_ca_name="$EJBCA_ROOT_CA_NAME" +if [ -z "$root_ca_name" ]; then + echo "Using default root CA name Root-CA" + root_ca_name="Root-CA" +fi +sub_ca_name="$EJBCA_SUB_CA_NAME" +if [ -z "$sub_ca_name" ]; then + echo "Using default sub CA name Sub-CA" + sub_ca_name="Sub-CA" +fi + +createRootCA "ManagementCA" "$tokenProperties" +createRootCA "$root_ca_name" "$tokenProperties" +createSubCA "$sub_ca_name" "$tokenProperties" "$root_ca_name" + +############################ +# Import staged EEPs & CPs # +############################ + +container_staging_dir="$EJBCA_EEP_CP_STAGE_DIR" +if [ ! -s "$container_staging_dir" ]; then + echo "Using default staging directory /opt/keyfactor/stage" + container_staging_dir="/opt/keyfactor/stage" +fi + +# Import end entity profiles from staging area +for file in "$container_staging_dir"/*; do + echo "Importing profile from $file" + ejbcactl ca importprofiles -d "$file" +done + +########################################## +# Create SuperAdmin certificate and role # +########################################## + +common_name="$EJBCA_SUPERADMIN_COMMONNAME" +if [ -z "$common_name" ]; then + echo "Using default common name SuperAdmin" + common_name="SuperAdmin" +fi + +superadmin_secret_name="$EJBCA_SUPERADMIN_SECRET_NAME" +if [ -z "$superadmin_secret_name" ]; then + echo "Using default secret name superadmin-tls" + superadmin_secret_name="superadmin-tls" +fi + +managementca_secret_name="$EJBCA_MANAGEMENTCA_SECRET_NAME" +if [ -z "$managementca_secret_name" ]; then + echo "Using default secret name managementca" + managementca_secret_name="managementca" +fi + +# Create SuperAdmin +ejbcactl ra addendentity \ + --username "SuperAdmin" \ + --dn "CN=$common_name" \ + --caname "ManagementCA" \ + --certprofile "Authentication-2048-3y" \ + --eeprofile "adminInternal" \ + --type 1 \ + --token "PEM" \ + --password "foo123" + +ejbcactl ra setclearpwd SuperAdmin foo123 +ejbcactl batch + +superadmin_cert="/opt/keyfactor/p12/pem/$common_name.pem" +superadmin_key="/opt/keyfactor/p12/pem/$common_name-Key.pem" +createK8sTLSSecret "$superadmin_secret_name" "$superadmin_cert" "$superadmin_key" +managementca_cert="/opt/keyfactor/p12/pem/$common_name-CA.pem" +createK8sOpaqueSecret "$managementca_secret_name" "ca.crt" "$(cat $managementca_cert | base64 | tr -d '\n')" + +# Add a role to allow the SuperAdmin to access the node +ejbcactl roles addrolemember \ + --role 'Super Administrator Role' \ + --caname 'ManagementCA' \ + --with 'WITH_COMMONNAME' \ + --value "$common_name" + +# Enable the /ejbca/ejbca-rest-api endpoint +ejbcactl config protocols enable --name "REST Certificate Management" + +######################################### +# Create the in-cluster TLS certificate # +######################################### + +subca_secret_name="$EJBCA_SUBCA_SECRET_NAME" +if [ -z "$managementca_secret_name" ]; then + echo "Using default secret name subca" + managementca_secret_name="subca" +fi + +reverseproxy_fqdn="$EJBCA_CLUSTER_REVERSEPROXY_FQDN" +if [ -z "$reverseproxy_fqdn" ]; then + echo "Skipping in-cluster reverse proxy TLS config - EJBCA_CLUSTER_REVERSEPROXY_FQDN not set" + return 0 +fi + +reverseproxy_secret_name="$EJBCA_RP_TLS_SECRET_NAME" +if [ -z "$reverseproxy_secret_name" ]; then + echo "Using default reverseproxy secret name ejbca-reverseproxy-tls" + ingress_secret_name="ejbca-reverseproxy-tls" +fi + +echo "Creating server certificate for $reverseproxy_fqdn" +ejbcactl ra addendentity \ + --username "$reverseproxy_fqdn" \ + --altname dNSName="$reverseproxy_fqdn" \ + --dn "CN=$reverseproxy_fqdn" \ + --caname "Sub-CA" \ + --certprofile "tlsServerAuth" \ + --eeprofile "tlsServerAnyCA" \ + --type 1 \ + --token "PEM" \ + --password "foo123" + +ejbcactl ra setclearpwd "$reverseproxy_fqdn" foo123 +ejbcactl batch + +ls -l "/opt/keyfactor/p12/pem" + +server_cert="/opt/keyfactor/p12/pem/$reverseproxy_fqdn.pem" +server_key="/opt/keyfactor/p12/pem/$reverseproxy_fqdn-Key.pem" +createK8sTLSSecret "$reverseproxy_secret_name" "$server_cert" "$server_key" +subca_cert="/opt/keyfactor/p12/pem/$reverseproxy_fqdn-CA.pem" +createK8sOpaqueSecret "$subca_secret_name" "ca.crt" "$(cat $subca_cert | base64 | tr -d '\n')" diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/certprofile_Authentication-2048-3y-1510586178.xml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/certprofile_Authentication-2048-3y-1510586178.xml new file mode 100644 index 00000000000..c0cb8e317d4 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/certprofile_Authentication-2048-3y-1510586178.xml @@ -0,0 +1,552 @@ + + + + + version + 51.0 + + + type + 1 + + + certversion + X509v3 + + + encodedvalidity + 3y + + + usecertificatevalidityoffset + false + + + certificatevalidityoffset + -10m + + + useexpirationrestrictionforweekdays + false + + + expirationrestrictionforweekdaysbefore + true + + + expirationrestrictionweekdays + + + true + + + true + + + false + + + false + + + false + + + true + + + true + + + + + allowvalidityoverride + false + + + description + + + + allowextensionoverride + false + + + allowdnoverride + false + + + allowdnoverridebyeei + false + + + allowbackdatedrevokation + false + + + usecertificatestorage + true + + + storecertificatedata + true + + + storesubjectaltname + true + + + usebasicconstrants + false + + + basicconstraintscritical + true + + + usesubjectkeyidentifier + true + + + subjectkeyidentifiercritical + false + + + useauthoritykeyidentifier + true + + + authoritykeyidentifiercritical + false + + + usesubjectalternativename + true + + + subjectalternativenamecritical + false + + + useissueralternativename + false + + + issueralternativenamecritical + false + + + usecrldistributionpoint + true + + + usedefaultcrldistributionpoint + true + + + crldistributionpointcritical + false + + + crldistributionpointuri + + + + usefreshestcrl + false + + + usecadefinedfreshestcrl + false + + + freshestcrluri + + + + crlissuer + + + + usecertificatepolicies + false + + + certificatepoliciescritical + false + + + certificatepolicies + + + + availablekeyalgorithms + + + RSA + + + + + availableeccurves + + + ANY_EC_CURVE + + + + + availablebitlengths + + + 2048 + + + + + minimumavailablebitlength + 2048 + + + maximumavailablebitlength + 2048 + + + signaturealgorithm + SHA256WithRSA + + + usekeyusage + true + + + keyusage + + + true + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + + + allowkeyusageoverride + false + + + keyusagecritical + true + + + useextendedkeyusage + true + + + extendedkeyusage + + + 1.3.6.1.5.5.7.3.2 + + + 1.3.6.1.5.2.3.4 + + + 1.3.6.1.4.1.311.20.2.2 + + + 1.3.6.1.5.5.7.3.21 + + + + + extendedkeyusagecritical + false + + + usedocumenttypelist + false + + + documenttypelistcritical + false + + + documenttypelist + + + + availablecas + + + -1 + + + + + usedpublishers + + + + useocspnocheck + false + + + useldapdnorder + false + + + usecustomdnorder + false + + + usemicrosofttemplate + false + + + microsofttemplate + + + + usecardnumber + false + + + usecnpostfix + false + + + cnpostfix + + + + usesubjectdnsubset + false + + + subjectdnsubset + + + + usesubjectaltnamesubset + false + + + subjectaltnamesubset + + + + usepathlengthconstraint + false + + + pathlengthconstraint + 0 + + + useqcstatement + false + + + usepkixqcsyntaxv2 + false + + + useqcstatementcritical + false + + + useqcstatementraname + + + + useqcsematicsid + + + + useqcetsiqccompliance + false + + + useqcetsisignaturedevice + false + + + useqcetsivaluelimit + false + + + qcetsivaluelimit + 0 + + + qcetsivaluelimitexp + 0 + + + qcetsivaluelimitcurrency + + + + useqcetsiretentionperiod + false + + + qcetsiretentionperiod + 0 + + + useqccustomstring + false + + + qccustomstringoid + + + + qccustomstringtext + + + + qcetsipds + + + + qcetsitype + + + + usecertificatetransparencyincerts + false + + + usecertificatetransparencyinocsp + false + + + usecertificatetransparencyinpublisher + false + + + usesubjectdirattributes + false + + + usenameconstraints + false + + + useauthorityinformationaccess + true + + + caissuers + + + + usedefaultcaissuer + true + + + usedefaultocspservicelocator + true + + + ocspservicelocatoruri + + + + cvcaccessrights + 0 + + + usedcertificateextensions + + + + approvals + + + + useprivkeyusageperiodnotbefore + false + + + useprivkeyusageperiod + false + + + useprivkeyusageperiodnotafter + false + + + privkeyusageperiodstartoffset + 0 + + + privkeyusageperiodlength + 63072000 + + + usesingleactivecertificateconstraint + false + + + overridableextensionoids + + + + nonoverridableextensionoids + + + + numofreqapprovals + 1 + + + approvalsettings + + + + approvalProfile + -1 + + + useqccountries + false + + + qccountriestring + + + + usemsobjectsidextension + true + + + usetruncatedsubjectkeyidentifier + false + + + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/certprofile_tlsServerAuth-1841776707.xml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/certprofile_tlsServerAuth-1841776707.xml new file mode 100644 index 00000000000..20e3ea51184 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/certprofile_tlsServerAuth-1841776707.xml @@ -0,0 +1,588 @@ + + + + + version + 51.0 + + + type + 1 + + + certversion + X509v3 + + + encodedvalidity + 10y + + + usecertificatevalidityoffset + false + + + certificatevalidityoffset + -10m + + + useexpirationrestrictionforweekdays + false + + + expirationrestrictionforweekdaysbefore + true + + + expirationrestrictionweekdays + + + true + + + true + + + false + + + false + + + false + + + true + + + true + + + + + allowvalidityoverride + false + + + description + + + + allowextensionoverride + false + + + allowdnoverride + false + + + allowdnoverridebyeei + false + + + allowbackdatedrevokation + false + + + usecertificatestorage + true + + + storecertificatedata + true + + + storesubjectaltname + false + + + usebasicconstrants + false + + + basicconstraintscritical + true + + + usesubjectkeyidentifier + true + + + subjectkeyidentifiercritical + false + + + useauthoritykeyidentifier + true + + + authoritykeyidentifiercritical + false + + + usesubjectalternativename + true + + + subjectalternativenamecritical + false + + + useissueralternativename + false + + + issueralternativenamecritical + false + + + usecrldistributionpoint + true + + + usedefaultcrldistributionpoint + true + + + crldistributionpointcritical + false + + + crldistributionpointuri + + + + usefreshestcrl + false + + + usecadefinedfreshestcrl + false + + + freshestcrluri + + + + crlissuer + + + + usecertificatepolicies + false + + + certificatepoliciescritical + false + + + certificatepolicies + + + + availablekeyalgorithms + + + RSA + + + + + availableeccurves + + + ANY_EC_CURVE + + + + + availablebitlengths + + + 2048 + + + 3072 + + + + + minimumavailablebitlength + 2048 + + + maximumavailablebitlength + 3072 + + + signaturealgorithm + SHA256WithRSA + + + usekeyusage + true + + + keyusage + + + true + + + false + + + true + + + false + + + false + + + false + + + false + + + false + + + false + + + + + allowkeyusageoverride + false + + + keyusagecritical + true + + + useextendedkeyusage + true + + + extendedkeyusage + + + 1.3.6.1.5.5.7.3.1 + + + + + extendedkeyusagecritical + false + + + usedocumenttypelist + false + + + documenttypelistcritical + false + + + documenttypelist + + + + availablecas + + + -1 + + + + + usedpublishers + + + + useocspnocheck + false + + + useldapdnorder + false + + + usecustomdnorder + false + + + usemicrosofttemplate + false + + + microsofttemplate + + + + usecardnumber + false + + + usecnpostfix + false + + + cnpostfix + + + + usesubjectdnsubset + false + + + subjectdnsubset + + + + usesubjectaltnamesubset + false + + + subjectaltnamesubset + + + + usepathlengthconstraint + false + + + pathlengthconstraint + 0 + + + useqcstatement + false + + + usepkixqcsyntaxv2 + false + + + useqcstatementcritical + false + + + useqcstatementraname + + + + useqcsematicsid + + + + useqcetsiqccompliance + false + + + useqcetsisignaturedevice + false + + + useqcetsivaluelimit + false + + + qcetsivaluelimit + 0 + + + qcetsivaluelimitexp + 0 + + + qcetsivaluelimitcurrency + + + + useqcetsiretentionperiod + false + + + qcetsiretentionperiod + 0 + + + useqccustomstring + false + + + qccustomstringoid + + + + qccustomstringtext + + + + qcetsipds + + + + qcetsitype + + + + usecertificatetransparencyincerts + false + + + usecertificatetransparencyinocsp + false + + + usecertificatetransparencyinpublisher + false + + + usesubjectdirattributes + false + + + usenameconstraints + false + + + useauthorityinformationaccess + true + + + caissuers + + + + usedefaultcaissuer + true + + + usedefaultocspservicelocator + true + + + ocspservicelocatoruri + + + + cvcaccessrights + 0 + + + usedcertificateextensions + + + + approvals + + + + org.cesecore.certificates.ca.ApprovalRequestType + REVOCATION + + -1 + + + + org.cesecore.certificates.ca.ApprovalRequestType + KEYRECOVER + + -1 + + + + org.cesecore.certificates.ca.ApprovalRequestType + ADDEDITENDENTITY + + -1 + + + + + useprivkeyusageperiodnotbefore + false + + + useprivkeyusageperiod + false + + + useprivkeyusageperiodnotafter + false + + + privkeyusageperiodstartoffset + 0 + + + privkeyusageperiodlength + 63072000 + + + usesingleactivecertificateconstraint + false + + + overridableextensionoids + + + + nonoverridableextensionoids + + + + allowcertsnoverride + false + + + usecabforganizationidentifier + false + + + usecustomdnorderldap + false + + + numofreqapprovals + 1 + + + approvalsettings + + + + approvalProfile + -1 + + + useqccountries + false + + + qccountriestring + + + + eabnamespaces + + + + usemsobjectsidextension + true + + + usetruncatedsubjectkeyidentifier + false + + + keyusageforbidencyrptionusageforecc + false + + + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_adminInternal-151490904.xml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_adminInternal-151490904.xml new file mode 100644 index 00000000000..d47d8af1c53 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_adminInternal-151490904.xml @@ -0,0 +1,1529 @@ + + + + + SUBJECTDNFIELDORDER + + + 50000 + + + + + SUBJECTALTNAMEFIELDORDER + + + + SUBJECTDIRATTRFIELDORDER + + + + SSH_FIELD_ORDER + + + + version + 18.0 + + + NUMBERARRAY + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 0 + + + 0 + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 1 + + + 1 + + + 0 + + + 1 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + 0 + + + + 2000000 + true + + + 1000000 + true + + + 3000000 + true + + + 5000000 + false + + + 1 + + + + 2000001 + true + + + 1000001 + true + + + 3000001 + true + + + 5000001 + false + + + 95 + + + + 2000095 + false + + + 1000095 + true + + + 3000095 + true + + + 5000095 + false + + + 96 + 8 + + + 2000096 + false + + + 1000096 + true + + + 3000096 + true + + + 5000096 + false + + + 26 + + + + 2000026 + false + + + 1000026 + true + + + 3000026 + true + + + 5000026 + false + + + 29 + 1510586178 + + + 2000029 + true + + + 1000029 + true + + + 3000029 + true + + + 5000029 + false + + + 30 + 1510586178 + + + 2000030 + true + + + 1000030 + true + + + 3000030 + true + + + 5000030 + false + + + 31 + 1 + + + 2000031 + true + + + 1000031 + true + + + 3000031 + true + + + 5000031 + false + + + 32 + 1;2;3;4 + + + 2000032 + true + + + 1000032 + true + + + 3000032 + true + + + 5000032 + false + + + 33 + + + + 2000033 + false + + + 1000033 + true + + + 3000033 + true + + + 5000033 + false + + + 34 + + + + 2000034 + true + + + 1000034 + false + + + 3000034 + true + + + 5000034 + false + + + 38 + 1999392212 + + + 2000038 + true + + + 1000038 + true + + + 3000038 + true + + + 5000038 + false + + + 37 + 1999392212 + + + 2000037 + true + + + 1000037 + true + + + 3000037 + true + + + 5000037 + false + + + 98 + + + + 2000098 + false + + + 1000098 + false + + + 3000098 + true + + + 5000098 + false + + + 99 + + + + 2000099 + false + + + 1000099 + false + + + 3000099 + true + + + 5000099 + false + + + 97 + + + + 2000097 + false + + + 1000097 + false + + + 3000097 + true + + + 5000097 + false + + + 91 + + + + 2000091 + false + + + 1000091 + false + + + 3000091 + true + + + 5000091 + false + + + 94 + -1 + + + 2000094 + false + + + 1000094 + false + + + 3000094 + false + + + 5000094 + false + + + 93 + -1 + + + 2000093 + false + + + 1000093 + false + + + 3000093 + false + + + 5000093 + false + + + 89 + + + + 2000089 + false + + + 1000089 + false + + + 3000089 + true + + + 5000089 + false + + + 88 + + + + 2000088 + false + + + 1000088 + false + + + 3000088 + true + + + 5000088 + false + + + 87 + + + + 2000087 + false + + + 1000087 + false + + + 3000087 + false + + + 5000087 + false + + + 5 + + + + 2000005 + true + + + 1000005 + true + + + 3000005 + true + + + 5000005 + false + + + 60 + + + + 1000090 + true + + + 90 + 0 + + + 1000002 + false + + + 2 + false + + + 2000002 + false + + + REVERSEFFIELDCHECKS + false + + + ALLOW_MERGEDN_WEBSERVICES + false + + + ALLOW_MULTI_VALUE_RDNS + false + + + 201 + + + + 2000201 + false + + + 3000201 + false + + + 202 + + + + 2000202 + false + + + 3000202 + false + + + 1000092 + false + + + USEEXTENSIONDATA + false + + + PSD2QCSTATEMENT + false + + + 1000035 + false + + + PRINTINGUSE + false + + + USERNOTIFICATIONS + + + + 1000028 + false + + + 2000028 + false + + + 28 + false + + + 2000035 + false + + + 35 + false + + + PRINTINGREQUIRED + false + + + PRINTINGDEFAULT + false + + + ALLOW_MERGEDN + false + + + PROFILETYPE + 1 + + + 110 + + + + 1000086 + false + + + REDACTPII + false + + + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_spireIntermediateCA-1440949471.xml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_spireIntermediateCA-1440949471.xml new file mode 100644 index 00000000000..35fe10fafa6 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_spireIntermediateCA-1440949471.xml @@ -0,0 +1,980 @@ + + + + + version + 18.0 + + + NUMBERARRAY + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 0 + + + 0 + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 0 + + + 1 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + SUBJECTDNFIELDORDER + + + 120000 + + + 160000 + + + 60000 + + + + + SUBJECTALTNAMEFIELDORDER + + + 210000 + + + + + SUBJECTDIRATTRFIELDORDER + + + + SSH_FIELD_ORDER + + + + PROFILETYPE + 1 + + + 0 + + + + 2000000 + true + + + 1000000 + true + + + 3000000 + true + + + 5000000 + false + + + 1 + + + + 2000001 + true + + + 1000001 + true + + + 3000001 + true + + + 5000001 + false + + + 95 + + + + 2000095 + false + + + 1000095 + true + + + 3000095 + true + + + 5000095 + false + + + 96 + 8 + + + 2000096 + false + + + 1000096 + true + + + 3000096 + true + + + 5000096 + false + + + 26 + + + + 2000026 + false + + + 1000026 + true + + + 3000026 + true + + + 5000026 + false + + + 29 + 2 + + + 2000029 + true + + + 1000029 + true + + + 3000029 + true + + + 5000029 + false + + + 30 + 2 + + + 2000030 + true + + + 1000030 + true + + + 3000030 + true + + + 5000030 + false + + + 31 + 1 + + + 2000031 + true + + + 1000031 + true + + + 3000031 + true + + + 5000031 + false + + + 32 + 1;2;5;3;4 + + + 2000032 + true + + + 1000032 + true + + + 3000032 + true + + + 5000032 + false + + + 33 + + + + 2000033 + false + + + 1000033 + true + + + 3000033 + true + + + 5000033 + false + + + 34 + + + + 2000034 + true + + + 1000034 + false + + + 3000034 + true + + + 5000034 + false + + + 38 + 1 + + + 2000038 + true + + + 1000038 + true + + + 3000038 + true + + + 5000038 + false + + + 37 + 419280416 + + + 2000037 + true + + + 1000037 + true + + + 3000037 + true + + + 5000037 + false + + + 98 + + + + 2000098 + false + + + 1000098 + false + + + 3000098 + true + + + 5000098 + false + + + 99 + + + + 2000099 + false + + + 1000099 + false + + + 3000099 + true + + + 5000099 + false + + + 97 + + + + 2000097 + false + + + 1000097 + false + + + 3000097 + true + + + 5000097 + false + + + 91 + + + + 2000091 + false + + + 1000091 + false + + + 3000091 + true + + + 5000091 + false + + + 94 + -1 + + + 2000094 + false + + + 1000094 + false + + + 3000094 + true + + + 5000094 + false + + + 93 + -1 + + + 2000093 + false + + + 1000093 + false + + + 3000093 + true + + + 5000093 + false + + + 89 + + + + 2000089 + false + + + 1000089 + false + + + 3000089 + true + + + 5000089 + false + + + 88 + + + + 2000088 + false + + + 1000088 + false + + + 3000088 + true + + + 5000088 + false + + + 87 + + + + 2000087 + false + + + 1000087 + false + + + 3000087 + true + + + 5000087 + false + + + 86 + 7 + + + 2000086 + false + + + 1000086 + false + + + 3000086 + false + + + 5000086 + false + + + 3000201 + true + + + 3000202 + true + + + 3000203 + true + + + 12 + + + + 2000012 + false + + + 1000012 + true + + + 3000012 + true + + + 5000012 + false + + + 16 + + + + 2000016 + false + + + 1000016 + true + + + 3000016 + true + + + 5000016 + false + + + 21 + + + + 2000021 + false + + + 1000021 + true + + + 3000021 + true + + + 5000021 + false + + + 1000090 + true + + + 90 + 0 + + + 1000002 + false + + + 2 + false + + + 2000002 + false + + + 110 + + + + REVERSEFFIELDCHECKS + false + + + ALLOW_MERGEDN + false + + + ALLOW_MULTI_VALUE_RDNS + false + + + 1000092 + false + + + USEEXTENSIONDATA + false + + + PSD2QCSTATEMENT + false + + + 1000028 + false + + + REDACTPII + false + + + 1000035 + false + + + USERNOTIFICATIONS + + + + 2000028 + false + + + 28 + false + + + 2000035 + false + + + 35 + false + + + 6 + + + + 2000006 + false + + + 1000006 + true + + + 3000006 + true + + + 5000006 + false + + + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_tlsServerAnyCA-327363278.xml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_tlsServerAnyCA-327363278.xml new file mode 100644 index 00000000000..31610d6bf2e --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/staging/entityprofile_tlsServerAnyCA-327363278.xml @@ -0,0 +1,934 @@ + + + + + version + 18.0 + + + NUMBERARRAY + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 0 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 0 + + + 0 + + + 1 + + + 1 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 0 + + + 1 + + + 0 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + 1 + + + + + SUBJECTDNFIELDORDER + + + 50000 + + + + + SUBJECTALTNAMEFIELDORDER + + + 180000 + + + + + SUBJECTDIRATTRFIELDORDER + + + + SSH_FIELD_ORDER + + + + PROFILETYPE + 1 + + + 0 + + + + 2000000 + true + + + 1000000 + true + + + 3000000 + true + + + 5000000 + false + + + 1 + + + + 2000001 + true + + + 1000001 + true + + + 3000001 + true + + + 5000001 + false + + + 95 + + + + 2000095 + false + + + 1000095 + true + + + 3000095 + true + + + 5000095 + false + + + 96 + 8 + + + 2000096 + false + + + 1000096 + true + + + 3000096 + true + + + 5000096 + false + + + 5 + + + + 2000005 + true + + + 1000005 + true + + + 3000005 + true + + + 5000005 + false + + + 26 + + + + 2000026 + false + + + 1000026 + true + + + 3000026 + true + + + 5000026 + false + + + 29 + 9 + + + 2000029 + true + + + 1000029 + true + + + 3000029 + true + + + 5000029 + false + + + 30 + 9;781718050;1841776707 + + + 2000030 + true + + + 1000030 + true + + + 3000030 + true + + + 5000030 + false + + + 31 + 1 + + + 2000031 + true + + + 1000031 + true + + + 3000031 + true + + + 5000031 + false + + + 32 + 1;2;5;3;4 + + + 2000032 + true + + + 1000032 + true + + + 3000032 + true + + + 5000032 + false + + + 33 + + + + 2000033 + false + + + 1000033 + true + + + 3000033 + true + + + 5000033 + false + + + 34 + + + + 2000034 + true + + + 1000034 + false + + + 3000034 + true + + + 5000034 + false + + + 38 + 1999392212;-1090145480;-913475458 + + + 2000038 + true + + + 1000038 + true + + + 3000038 + true + + + 5000038 + false + + + 37 + 1999392212 + + + 2000037 + true + + + 1000037 + true + + + 3000037 + true + + + 5000037 + false + + + 98 + + + + 2000098 + false + + + 1000098 + false + + + 3000098 + true + + + 5000098 + false + + + 99 + + + + 2000099 + false + + + 1000099 + false + + + 3000099 + true + + + 5000099 + false + + + 97 + + + + 2000097 + false + + + 1000097 + false + + + 3000097 + true + + + 5000097 + false + + + 91 + + + + 2000091 + false + + + 1000091 + false + + + 3000091 + true + + + 5000091 + false + + + 94 + -1 + + + 2000094 + false + + + 1000094 + false + + + 3000094 + true + + + 5000094 + false + + + 93 + -1 + + + 2000093 + false + + + 1000093 + false + + + 3000093 + true + + + 5000093 + false + + + 89 + + + + 2000089 + false + + + 1000089 + false + + + 3000089 + true + + + 5000089 + false + + + 88 + + + + 2000088 + false + + + 1000088 + false + + + 3000088 + true + + + 5000088 + false + + + 87 + + + + 2000087 + false + + + 1000087 + false + + + 3000087 + true + + + 5000087 + false + + + 86 + 7 + + + 2000086 + false + + + 1000086 + false + + + 3000086 + false + + + 5000086 + false + + + 3000201 + true + + + 3000202 + true + + + 3000203 + true + + + 18 + + + + 2000018 + false + + + 1000018 + true + + + 3000018 + true + + + 5000018 + false + + + 1000090 + true + + + 90 + 0 + + + 1000002 + false + + + 2 + false + + + 2000002 + false + + + 110 + + + + REVERSEFFIELDCHECKS + false + + + ALLOW_MERGEDN + false + + + ALLOW_MULTI_VALUE_RDNS + false + + + 1000092 + false + + + USEEXTENSIONDATA + false + + + PSD2QCSTATEMENT + false + + + REDACTPII + false + + + 1000035 + false + + + USERNOTIFICATIONS + + + + 1000028 + false + + + 2000028 + false + + + 28 + false + + + 2000035 + false + + + 35 + false + + + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/_helpers.tpl b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/_helpers.tpl new file mode 100644 index 00000000000..f1df343ef02 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ejbca.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ejbca.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ejbca.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ejbca.labels" -}} +helm.sh/chart: {{ include "ejbca.chart" . }} +{{ include "ejbca.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{- define "ejbca.databaseLabels" -}} +helm.sh/chart: {{ include "ejbca.chart" . }} +{{ include "ejbca.databaseSelectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ejbca.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ejbca.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "ejbca.databaseSelectorLabels" -}} +app.kubernetes.io/name: mariadb +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ejbca.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "ejbca.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/database-service.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/database-service.yaml new file mode 100644 index 00000000000..1a4ab92a4c9 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/database-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.database.service.name }} + labels: + {{- include "ejbca.databaseLabels" . | nindent 4 }} +spec: + type: {{ .Values.database.service.type }} + ports: + - name: tcp-db-port + port: {{ .Values.database.service.port }} + targetPort: {{ .Values.database.service.port }} + selector: + {{- include "ejbca.databaseSelectorLabels" . | nindent 4 }} \ No newline at end of file diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/database-statefulset.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/database-statefulset.yaml new file mode 100644 index 00000000000..94dc75b4f9e --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/database-statefulset.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mariadb-statefulset + labels: + {{- include "ejbca.databaseLabels" . | nindent 4 }} +spec: + serviceName: {{ .Values.database.service.name}} + replicas: 1 + selector: + matchLabels: + {{- include "ejbca.databaseSelectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "ejbca.databaseSelectorLabels" . | nindent 8 }} + spec: + {{- with .Values.database.image.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: mariadb + image: {{ .Values.database.image.repository }}:{{ .Values.database.image.tag }} + ports: + - containerPort: {{ .Values.database.service.port }} + name: mariadb + env: + - name: MARIADB_ROOT_PASSWORD + value: {{ .Values.database.rootPassword }} + - name: MARIADB_DATABASE + value: {{ .Values.database.name }} + - name: MARIADB_USER + value: {{ .Values.database.username }} + - name: MARIADB_PASSWORD + value: {{ .Values.database.password }} + volumeMounts: + - name: datadir + mountPath: /var/lib/mysql + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 10G \ No newline at end of file diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-deployment.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-deployment.yaml new file mode 100644 index 00000000000..45ca5c977f2 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-deployment.yaml @@ -0,0 +1,151 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ejbca.fullname" . }} + labels: + {{- include "ejbca.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.ejbca.replicaCount }} + selector: + matchLabels: + {{- include "ejbca.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.ejbca.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "ejbca.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.ejbca.image.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "ejbca.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.ejbca.image.repository }}:{{ .Values.ejbca.image.tag }}" + imagePullPolicy: {{ .Values.ejbca.image.pullPolicy }} + ports: + {{- range .Values.ejbca.containerPorts }} + - name: {{ .name }} + containerPort: {{ .containerPort }} + protocol: {{ .protocol }} + {{- end }} + startupProbe: + httpGet: + port: 8081 + path: /ejbca/publicweb/healthcheck/ejbcahealth + failureThreshold: 1000 # 50 * 2 seconds + 45-second delay gives 145 seconds for EJBCA to start + periodSeconds: 2 + initialDelaySeconds: 45 + livenessProbe: + httpGet: + port: 8081 + path: /ejbca/publicweb/healthcheck/ejbcahealth + env: + - name: DATABASE_JDBC_URL + value: "jdbc:mariadb://{{ .Values.database.service.name }}:{{ .Values.database.service.port }}/ejbca?characterEncoding=utf8" + - name: DATABASE_USER + value: {{ .Values.database.username }} + - name: DATABASE_PASSWORD + value: {{ .Values.database.password }} + - name: PROXY_HTTP_BIND + value: "{{ .Values.ejbca.proxyHttpBind }}" + - name: LOG_AUDIT_TO_DB + value: "{{ .Values.ejbca.logAuditToDatabase }}" + {{ if .Values.ejbca.ingress.enabled }} + - name: HTTPSERVER_HOSTNAME + value: "{{ index .Values.ejbca.ingress.hosts 0 "host" }}" + {{ end }} + - name: TLS_SETUP_ENABLED + value: "simple" + {{ if .Values.ejbca.extraEnvironmentVars }} + {{- range .Values.ejbca.extraEnvironmentVars }} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + volumeMounts: + {{- range .Values.ejbca.volumes }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- if .Values.ejbca.reverseProxy.enabled }} + - name: httpd + image: "{{ .Values.ejbca.reverseProxy.image.repository }}:{{ .Values.ejbca.reverseProxy.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.ejbca.reverseProxy.image.pullPolicy }} + {{- if .Values.ejbca.reverseProxy.service.ports }} + ports: + {{- range .Values.ejbca.reverseProxy.service.ports }} + - name: {{ .name }} + containerPort: {{ .port }} + protocol: {{ .protocol }} + {{- end }} + {{- end }} + readinessProbe: + tcpSocket: + port: 8080 + volumeMounts: + - name: httpd-configmap + mountPath: /usr/local/apache2/conf/ + {{- if .Values.ejbca.reverseProxy.service.ports }} + {{- range .Values.ejbca.reverseProxy.service.ports }} + {{- if .authCASecretName }} + - name: {{ .authCASecretName }} + mountPath: {{ .baseCaCertDir }} + {{- end }} + {{- if .tlsSecretName }} + - name: {{ .tlsSecretName }} + mountPath: {{ .baseCertDir }} + {{- end }} + {{- end }} + {{- end }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumes: + {{- range .Values.ejbca.volumes }} + - name: {{ .name }} + configMap: + name: {{ .configMapName }} + {{- end }} + {{- if .Values.ejbca.reverseProxy.enabled }} + - name: httpd-configmap + configMap: + name: httpd-configmap + {{- if .Values.ejbca.reverseProxy.service.ports }} + {{- range .Values.ejbca.reverseProxy.service.ports }} + {{- if .authCASecretName }} + - name: {{ .authCASecretName }} + secret: + secretName: {{ .authCASecretName }} + {{- end }} + {{- if .tlsSecretName }} + - name: {{ .tlsSecretName }} + secret: + secretName: {{ .tlsSecretName }} + {{- end }} + {{- end }} + {{- end }} + {{- end}} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-reverseproxyconfig.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-reverseproxyconfig.yaml new file mode 100644 index 00000000000..1d947ae4c75 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-reverseproxyconfig.yaml @@ -0,0 +1,113 @@ +{{- if .Values.ejbca.reverseProxy.enabled -}} +apiVersion: v1 +data: + httpd.conf: |+ + # Load required modules + LoadModule mpm_event_module modules/mod_mpm_event.so + LoadModule headers_module modules/mod_headers.so + LoadModule authz_core_module modules/mod_authz_core.so + LoadModule access_compat_module modules/mod_access_compat.so + LoadModule log_config_module modules/mod_log_config.so + LoadModule proxy_module modules/mod_proxy.so + LoadModule proxy_http_module modules/mod_proxy_http.so + LoadModule unixd_module modules/mod_unixd.so + LoadModule filter_module modules/mod_filter.so + LoadModule substitute_module modules/mod_substitute.so + LoadModule rewrite_module modules/mod_rewrite.so + LoadModule socache_shmcb_module modules/mod_socache_shmcb.so + LoadModule ssl_module modules/mod_ssl.so + + # Set default connection behavior + MaxKeepAliveRequests 1000 + KeepAlive On + KeepAliveTimeout 180 + + # Set basic security for Unix platform + + User daemon + Group daemon + + + # Set log configuration + ErrorLog /proc/self/fd/2 + LogLevel info + + LogFormat "%h %A:%p %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%h %A:%p %l %u %t \"%r\" %>s %b" common + CustomLog /proc/self/fd/1 common + + + ServerRoot "/usr/local/apache2" + Listen 8080 + {{- range .Values.ejbca.reverseProxy.service.ports }} + Listen {{ .port }} + {{- end }} + + + AllowOverride none + Require all denied + + + {{- range .Values.ejbca.reverseProxy.service.ports }} + + # Disallow any HTTP method that is not HEAD, GET or POST + RewriteEngine On + RewriteCond %{REQUEST_METHOD} !^(HEAD|GET|POST)$ [NC] + RewriteRule .* - [F,L] + + # Allow encoded slashes for OCSP GET + AllowEncodedSlashes On + + {{- if .tlsSecretName }} + SSLEngine On + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 +TLSv1.2 -TLSv1.3 + SSLCertificateFile "{{ .baseCertDir }}/tls.crt" + SSLCertificateKeyFile "{{ .baseCertDir }}/tls.key" + RequestHeader set X-Forwarded-Proto "https" + {{- end }} + + {{ if .authCASecretName }} + SSLCACertificateFile "{{ .baseCaCertDir }}/ca.crt" + + + SSLVerifyClient optional + SSLOptions +ExportCertData +StdEnvVars + RequestHeader set SSL_CLIENT_CERT "%{SSL_CLIENT_CERT}s" + + + + SSLVerifyClient optional + SSLOptions +ExportCertData +StdEnvVars + RequestHeader set SSL_CLIENT_CERT "%{SSL_CLIENT_CERT}s" + + + + SSLVerifyClient optional + SSLOptions +ExportCertData +StdEnvVars + RequestHeader set SSL_CLIENT_CERT "%{SSL_CLIENT_CERT}s" + + {{- end }} + + ProxyPass /ejbca/ http://localhost:{{ .targetPort }}/ejbca/ keepalive=On ping=500ms retry=1 timeout=300 + + # Add ProxyPass for EST and .well-known URLs + ProxyPass /.well-known/ http://localhost:{{ .targetPort }}/.well-known/ keepalive=On ping=500ms retry=1 timeout=300 + + {{- end }} + + # + # # Disallow any HTTP method that is not HEAD, GET or POST + # RewriteEngine On + # RewriteCond %{REQUEST_METHOD} !^(HEAD|GET|POST)$ [NC] + # RewriteRule .* - [F,L] + + # # Allow encoded slashes for OCSP GET + # AllowEncodedSlashes On + + # # Proxy http requests from K8s ingress to EJBCA via port 8082 + # ProxyPass /ejbca/ http://localhost:8009/ejbca/ keepalive=On ping=500ms retry=1 timeout=300 + # +kind: ConfigMap +metadata: + name: httpd-configmap +{{- end -}} diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-service.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-service.yaml new file mode 100644 index 00000000000..5af9eb5f985 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/ejbca-service.yaml @@ -0,0 +1,21 @@ +{{- $svcType := .Values.ejbca.service.type }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.ejbca.service.name }} + labels: + {{- include "ejbca.labels" . | nindent 4 }} +spec: + type: {{ .Values.ejbca.service.type }} + ports: + {{- range .Values.ejbca.service.ports }} + - name: {{ .name }} + port: {{ .port }} + targetPort: {{ .targetPort}} + protocol: {{ .protocol }} + {{- if contains "NodePort" $svcType }} + nodePort: {{ .nodePort }} + {{- end }} + {{- end }} + selector: + {{- include "ejbca.selectorLabels" . | nindent 4 }} diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/reverseproxy-service.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/reverseproxy-service.yaml new file mode 100644 index 00000000000..d8def2f72f9 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/reverseproxy-service.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ejbca.reverseProxy.enabled -}} +{{- $svcType := .Values.ejbca.reverseProxy.service.type }} +apiVersion: v1 +kind: Service +metadata: + name: {{ .Values.ejbca.reverseProxy.service.name }} + labels: + {{- include "ejbca.labels" . | nindent 4 }} +spec: + type: {{ .Values.ejbca.reverseProxy.service.type }} + ports: + {{- range .Values.ejbca.reverseProxy.service.ports }} + - name: {{ .name }} + port: {{ .port }} + targetPort: {{ .port}} + protocol: {{ .protocol }} + {{- if contains "NodePort" $svcType }} + nodePort: {{ .nodePort }} + {{- end }} + {{- end }} + selector: + {{- include "ejbca.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/serviceaccount.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/serviceaccount.yaml new file mode 100644 index 00000000000..86a7148fdbf --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/templates/serviceaccount.yaml @@ -0,0 +1,47 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ejbca.serviceAccountName" . }} + labels: + {{- include "ejbca.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + {{- include "ejbca.labels" . | nindent 4 }} + name: {{ include "ejbca.name" . }}-secret-role + namespace: {{ .Release.Namespace }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + {{- include "ejbca.labels" . | nindent 4 }} + name: {{ include "ejbca.name" . }}-secret-rolebinding + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "ejbca.name" . }}-secret-role +subjects: + - kind: ServiceAccount + name: {{ include "ejbca.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/test/integration/suites/upstream-authority-ejbca/conf/ejbca/values.yaml b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/values.yaml new file mode 100644 index 00000000000..4bb4dd2b6fd --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/ejbca/values.yaml @@ -0,0 +1,128 @@ +# Default values for ejbca. +# This is a YAML-formatted file. + +nameOverride: "" +fullnameOverride: "" + +ejbca: + replicaCount: 1 + + podAnnotations: {} + + image: + repository: keyfactor/ejbca-ce + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + imagePullSecrets: [] + + containerPorts: + - name: ejbca-http + containerPort: 8081 + protocol: TCP + - name: ejbca-https + containerPort: 8082 + protocol: TCP + + proxyHttpBind: "0.0.0.0" + logAuditToDatabase: "true" + + service: + name: ejbca-service + type: NodePort + ports: + - name: ejbca-http + port: 8081 + targetPort: ejbca-http + nodePort: 30080 + protocol: TCP + - name: ejbca-https + port: 8082 + targetPort: ejbca-https + nodePort: 30443 + protocol: TCP + + reverseProxy: + enabled: false + image: + repository: httpd + pullPolicy: IfNotPresent + tag: "2.4" + + service: + name: ejbca-rp-service + type: ClusterIP + ports: + - name: ejbca-rp-https + port: 8443 + targetPort: 8082 + protocol: TCP + + authCASecretName: managementca + tlsSecretName: ejbca-reverseproxy-tls + baseCertDir: '/usr/local/certs' + baseCaCertDir: '/usr/local/cacerts' + + volumes: [] + # - name: ejbca-eep-admininternal + # configMapName: ejbca-eep-admininternal + # mountPath: /opt/keyfactor/stage/admininternal + + extraEnvironmentVars: [] + # - name: EJBCA_EXTRA_ENV + # value: "EJBCA_EXTRA_ENV" + +database: + replicaCount: 1 + + name: ejbca + username: ejbca + password: ejbca + rootPassword: ejbca + + image: + repository: mariadb + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + imagePullSecrets: [] + + service: + name: ejbca-database-service + type: ClusterIP + port: 3306 + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +resources: {} + # limits: + # cpu: "16" + # memory: "4096Mi" + # requests: + # cpu: 1000m + # memory: "2048Mi" + +nodeSelector: {} + +tolerations: [] + +affinity: {} + diff --git a/test/integration/suites/upstream-authority-ejbca/conf/server/kustomization.yaml b/test/integration/suites/upstream-authority-ejbca/conf/server/kustomization.yaml new file mode 100644 index 00000000000..c87a9a25d09 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/server/kustomization.yaml @@ -0,0 +1,10 @@ +# kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# list of Resource Config to be Applied +resources: + - spire-server.yaml + +# namespace to deploy all Resources to +namespace: spire diff --git a/test/integration/suites/upstream-authority-ejbca/conf/server/spire-server.yaml b/test/integration/suites/upstream-authority-ejbca/conf/server/spire-server.yaml new file mode 100644 index 00000000000..27a9f205e19 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/conf/server/spire-server.yaml @@ -0,0 +1,122 @@ +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_x509_svid_ttl = "1h" + ca_ttl = "12h" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + KeyManager "memory" { + plugin_data = {} + } + + UpstreamAuthority "ejbca" { + plugin_data = { + hostname = "ejbca-rp-service.ejbca.svc.cluster.local:8443" + ca_cert_path = "/run/spire/ejbca/ca/ca.crt" + client_cert_path = "/run/spire/ejbca/mtls/tls.crt" + client_cert_key_path = "/run/spire/ejbca/mtls/tls.key" + ca_name = "Sub-CA" + end_entity_profile_name = "spireIntermediateCA" + certificate_profile_name = "SUBCA" + end_entity_name = "" + account_binding_id = "" + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +# This is the Deployment for the SPIRE server. It waits for SPIRE database to +# initialize and uses the SPIRE healthcheck command for liveness/readiness +# probes. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + shareProcessNamespace: true + containers: + - name: spire-server + image: spire-server:latest-local + imagePullPolicy: Never + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: superadmin-tls + mountPath: /run/spire/ejbca/mtls + readOnly: true + - name: subca + mountPath: /run/spire/ejbca/ca + readOnly: true + livenessProbe: + httpGet: + path: /live + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + volumes: + - name: spire-config + configMap: + name: spire-server + - name: superadmin-tls + secret: + secretName: superadmin-tls + - name: subca + secret: + secretName: subca + diff --git a/test/integration/suites/upstream-authority-ejbca/init-kubectl b/test/integration/suites/upstream-authority-ejbca/init-kubectl new file mode 100644 index 00000000000..c27940d7a99 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/init-kubectl @@ -0,0 +1,7 @@ +#!/bin/bash + +KUBECONFIG="${RUNDIR}/kubeconfig" +if [ ! -f "${RUNDIR}/kubeconfig" ]; then + ./bin/kind get kubeconfig --name=ejbca-test > "${RUNDIR}/kubeconfig" +fi +export KUBECONFIG diff --git a/test/integration/suites/upstream-authority-ejbca/teardown b/test/integration/suites/upstream-authority-ejbca/teardown new file mode 100755 index 00000000000..7f09a046354 --- /dev/null +++ b/test/integration/suites/upstream-authority-ejbca/teardown @@ -0,0 +1,10 @@ +#!/bin/bash + +source init-kubectl + +if [ -z "$SUCCESS" ]; then + ./bin/kubectl -nspire logs deployment/spire-server --all-containers || true +fi + +export KUBECONFIG= +./bin/kind delete cluster --name ejbca-test diff --git a/test/integration/test-one.sh b/test/integration/test-one.sh index 60e23e304d4..7b8a8283f60 100755 --- a/test/integration/test-one.sh +++ b/test/integration/test-one.sh @@ -83,8 +83,7 @@ cp -R "${TESTDIR}"/* "${RUNDIR}/" run-step() { local script="$1" if [ ! -x "$script" ]; then - log-warn "skipping \"$script\"; not executable" - return + fail-now "Failing: \"$script\" is not executable" fi log-debug "executing $(basename "$script")..." diff --git a/test/plugintest/load.go b/test/plugintest/load.go index 211e7fe93ca..942ab32a75a 100644 --- a/test/plugintest/load.go +++ b/test/plugintest/load.go @@ -6,7 +6,6 @@ import ( "testing" "github.com/sirupsen/logrus" - "github.com/sirupsen/logrus/hooks/test" "github.com/spiffe/spire/pkg/common/catalog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -26,7 +25,7 @@ type Plugin interface { func Load(t *testing.T, builtIn catalog.BuiltIn, pluginFacade catalog.Facade, options ...Option) Plugin { conf := &config{ builtInConfig: catalog.BuiltInConfig{ - Log: nullLogger(), + Log: testLogger(t), }, } for _, opt := range options { @@ -70,7 +69,24 @@ func Load(t *testing.T, builtIn catalog.BuiltIn, pluginFacade catalog.Facade, op } } -func nullLogger() logrus.FieldLogger { - log, _ := test.NewNullLogger() +func testLogger(t *testing.T) logrus.FieldLogger { + log := logrus.New() + log.SetOutput(io.Discard) + log.AddHook(logHook{t: t}) return log } + +type logHook struct{ t *testing.T } + +func (h logHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (h logHook) Fire(e *logrus.Entry) error { + s, err := e.String() + if err != nil { + return err + } + h.t.Logf("log: %s\n", s) + return nil +} diff --git a/test/spiretest/socketapi_windows.go b/test/spiretest/socketapi_windows.go index 30c751aa8f9..0a6eaf715f8 100644 --- a/test/spiretest/socketapi_windows.go +++ b/test/spiretest/socketapi_windows.go @@ -39,7 +39,7 @@ func StartGRPCOnNamedPipeServer(t *testing.T, pipeName string, registerFn func(s } func ServeGRPCServerOnNamedPipe(t *testing.T, server *grpc.Server, pipeName string) net.Addr { - listener, err := winio.ListenPipe(fmt.Sprintf(`\\.\`+filepath.Join("pipe", pipeName)), nil) + listener, err := winio.ListenPipe(`\\.\`+filepath.Join("pipe", pipeName), nil) require.NoError(t, err) ServeGRPCServerOnListener(t, server, listener) return namedpipe.AddrFromName(namedpipe.GetPipeName(listener.Addr().String())) diff --git a/test/testca/ca.go b/test/testca/ca.go index bfb99047420..442af982a29 100644 --- a/test/testca/ca.go +++ b/test/testca/ca.go @@ -149,6 +149,18 @@ func (ca *CA) JWTBundle() *jwtbundle.Bundle { return jwtbundle.FromJWTAuthorities(ca.td, ca.JWTAuthorities()) } +func (ca *CA) GetSubjectKeyID() string { + return x509util.SubjectKeyIDToString(ca.cert.SubjectKeyId) +} + +func (ca *CA) GetUpstreamAuthorityID() string { + authorityKeyID := ca.cert.AuthorityKeyId + if len(authorityKeyID) == 0 { + return "" + } + return x509util.SubjectKeyIDToString(authorityKeyID) +} + func (ca *CA) chain(includeRoot bool) []*x509.Certificate { chain := []*x509.Certificate{} next := ca @@ -183,7 +195,10 @@ func CreateCACertificate(tb testing.TB, parent *x509.Certificate, parentKey cryp if parent == nil { parent = tmpl parentKey = key + } else { + tmpl.AuthorityKeyId = parent.SubjectKeyId } + return CreateCertificate(tb, tmpl, parent, key.Public(), parentKey), key } diff --git a/test/util/cert_generation.go b/test/util/cert_generation.go index b5290b73a9f..0df638ef684 100644 --- a/test/util/cert_generation.go +++ b/test/util/cert_generation.go @@ -12,6 +12,7 @@ import ( "time" "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/test/clock" ) @@ -105,6 +106,11 @@ func Sign(req, parent *x509.Certificate, signerPrivateKey any) (*x509.Certificat return nil, nil, err } publicKey = key.Public() + skID, err := x509util.GetSubjectKeyID(publicKey) + if err != nil { + return nil, nil, err + } + req.SubjectKeyId = skID } if signerPrivateKey == nil {