diff --git a/Makefile b/Makefile index 45ccf74f7..feb2bf6f8 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ KUBECONFIG ?= ~/.kube/config git_tag := $(shell git describe --tags --abbrev=0 2>/dev/null || echo 'untagged') git_commit := $(shell git rev-parse HEAD 2>/dev/null | cut -c1-7) +go_root := $(shell go env GOROOT) RPAAS_OPERATOR_VERSION ?= $(git_tag)/$(git_commit) GO_LDFLAGS ?= -X=github.com/tsuru/rpaas-operator/version.Version=$(RPAAS_OPERATOR_VERSION) @@ -34,7 +35,7 @@ local: deploy/crds operator-sdk up local --go-ldflags $(GO_LDFLAGS) generate: - operator-sdk generate k8s + GOROOT=$(go_root) operator-sdk generate k8s build: build/plugin/rpaasv2 operator-sdk build $(IMAGE_OPERATOR):$(TAG) --go-build-args "-ldflags $(GO_LDFLAGS)" diff --git a/deploy/role.yaml b/deploy/role.yaml index b98b5b368..7a4b184df 100644 --- a/deploy/role.yaml +++ b/deploy/role.yaml @@ -60,3 +60,27 @@ rules: - horizontalpodautoscalers verbs: - '*' +- apiGroups: + - batch + resources: + - jobs + - cronjobs + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rpaas-cache-syncer +rules: +- apiGroups: [""] + resources: + - pods + verbs: + - get + - list +- apiGroups: [""] + resources: + - pods/exec + verbs: + - create diff --git a/deploy/role_binding.yaml b/deploy/role_binding.yaml index aa2c58010..e50499a7b 100644 --- a/deploy/role_binding.yaml +++ b/deploy/role_binding.yaml @@ -10,3 +10,16 @@ roleRef: kind: ClusterRole name: rpaas-operator apiGroup: rbac.authorization.k8s.io +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: rpaas-cache-snapshot-cronjob +subjects: +- kind: ServiceAccount + name: rpaas-cache-snapshot + namespace: rpaas-operator-integration +roleRef: + kind: ClusterRole + name: rpaas-cache-syncer + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/service_account.yaml b/deploy/service_account.yaml index 9b0b0dde3..aa5c5f3fd 100644 --- a/deploy/service_account.yaml +++ b/deploy/service_account.yaml @@ -2,3 +2,8 @@ apiVersion: v1 kind: ServiceAccount metadata: name: rpaas-operator +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rpaas-cache-snapshot diff --git a/go.mod b/go.mod index ac10bd97f..d93ec1c48 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.4.0 github.com/stretchr/testify v1.4.0 - github.com/tsuru/nginx-operator v0.5.0 + github.com/tsuru/nginx-operator v0.5.2 github.com/urfave/cli/v2 v2.0.0 github.com/willf/bitset v1.1.10 k8s.io/api v0.0.0 diff --git a/go.sum b/go.sum index 9c3d872ee..024b2e8d8 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,8 @@ github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1C github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tsuru/config v0.0.0-20180418191556-87403ee7da02/go.mod h1:bilf/jr5AGl9raMFnhM9hsp+ms6PK752UF89ODoRHW4= -github.com/tsuru/nginx-operator v0.5.0 h1:oWSu/9WHm+wKbvJSZn0NebQZCw2R0R5HYgGNZcasp+w= -github.com/tsuru/nginx-operator v0.5.0/go.mod h1:hvXj+bM5dUsnOt/PQE1ktHnLxM1ucnhKM0zOdHZyzsg= +github.com/tsuru/nginx-operator v0.5.2 h1:5i+0EKOnvVrkXEiXIxzKT75h4y80dj50kiqZwqlyeUk= +github.com/tsuru/nginx-operator v0.5.2/go.mod h1:hvXj+bM5dUsnOt/PQE1ktHnLxM1ucnhKM0zOdHZyzsg= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg= diff --git a/internal/pkg/rpaas/k8s.go b/internal/pkg/rpaas/k8s.go index 6c0295dbd..8aa1c1002 100644 --- a/internal/pkg/rpaas/k8s.go +++ b/internal/pkg/rpaas/k8s.go @@ -112,7 +112,7 @@ func (m *k8sRpaasManager) CreateInstance(ctx context.Context, args CreateArgs) e } setDescription(instance, args.Description) - setTeamOwner(instance, args.Team) + instance.SetTeamOwner(args.Team) setTags(instance, args.Tags) setIP(instance, args.IP()) @@ -145,7 +145,7 @@ func (m *k8sRpaasManager) UpdateInstance(ctx context.Context, instanceName strin instance.Spec.Flavors = args.Flavors() setDescription(instance, args.Description) - setTeamOwner(instance, args.Team) + instance.SetTeamOwner(args.Team) setTags(instance, args.Tags) setIP(instance, args.IP()) @@ -1411,18 +1411,6 @@ func setTags(instance *v1alpha1.RpaasInstance, tags []string) { }) } -func setTeamOwner(instance *v1alpha1.RpaasInstance, team string) { - if instance == nil { - return - } - - newLabels := map[string]string{labelKey("team-owner"): team} - - instance.Annotations = mergeMap(instance.Annotations, newLabels) - instance.Labels = mergeMap(instance.Labels, newLabels) - instance.Spec.PodTemplate.Labels = mergeMap(instance.Spec.PodTemplate.Labels, newLabels) -} - func (m *k8sRpaasManager) GetInstanceInfo(ctx context.Context, instanceName string) (*clientTypes.InstanceInfo, error) { instance, err := m.GetInstance(ctx, instanceName) if err != nil { diff --git a/internal/pkg/rpaas/nginx/configuration_render.go b/internal/pkg/rpaas/nginx/configuration_render.go index 0909e4b94..ee07a704a 100644 --- a/internal/pkg/rpaas/nginx/configuration_render.go +++ b/internal/pkg/rpaas/nginx/configuration_render.go @@ -7,11 +7,13 @@ package nginx import ( "bytes" "fmt" + "strconv" "strings" "text/template" "github.com/tsuru/rpaas-operator/pkg/apis/extensions/v1alpha1" "github.com/tsuru/rpaas-operator/pkg/util" + "k8s.io/apimachinery/pkg/api/resource" ) type ConfigurationRenderer interface { @@ -145,6 +147,15 @@ func managePort(instance *v1alpha1.RpaasInstance) int32 { return defaultManagePort } +func k8sQuantityToNginx(quantity *resource.Quantity) string { + if quantity == nil || quantity.IsZero() { + return "0" + } + + bytesN, _ := quantity.AsInt64() + return strconv.Itoa(int(bytesN)) +} + var templateFuncs = template.FuncMap(map[string]interface{}{ "boolValue": v1alpha1.BoolValue, "buildLocationKey": buildLocationKey, @@ -159,6 +170,7 @@ var templateFuncs = template.FuncMap(map[string]interface{}{ "contains": strings.Contains, "hasPrefix": strings.HasPrefix, "hasSuffix": strings.HasSuffix, + "k8sQuantityToNginx": k8sQuantityToNginx, }) var defaultMainTemplate = template.Must(template.New("main"). @@ -217,9 +229,9 @@ http { proxy_http_version 1.1; {{- if boolValue $config.CacheEnabled }} - proxy_cache_path {{ $config.CachePath }}/nginx levels=1:2 keys_zone=rpaas:{{ $config.CacheZoneSize }} + proxy_cache_path {{ $config.CachePath }}/nginx levels=1:2 keys_zone=rpaas:{{ k8sQuantityToNginx $config.CacheZoneSize }} {{- with $config.CacheInactive }} inactive={{ . }}{{ end }} - {{- with $config.CacheSize }} max_size={{ . }}{{ end }} + {{- with $config.CacheSize }} max_size={{ k8sQuantityToNginx . }}{{ end }} {{- with $config.CacheLoaderFiles }} loader_files={{ . }}{{ end }}; proxy_temp_path {{ $config.CachePath }}/nginx_tmp 1 2; @@ -311,6 +323,10 @@ http { {{- end }} {{- end }} + {{- if boolValue $config.CacheEnabled }} + proxy_cache rpaas; + {{- end }} + location = /_nginx_healthcheck { {{- if boolValue $config.VTSEnabled }} vhost_traffic_status_bypass_limit on; diff --git a/internal/pkg/rpaas/nginx/configuration_render_test.go b/internal/pkg/rpaas/nginx/configuration_render_test.go index 2b32ed546..cc4bae3d3 100644 --- a/internal/pkg/rpaas/nginx/configuration_render_test.go +++ b/internal/pkg/rpaas/nginx/configuration_render_test.go @@ -12,9 +12,13 @@ import ( nginxv1alpha1 "github.com/tsuru/nginx-operator/pkg/apis/nginx/v1alpha1" "github.com/tsuru/rpaas-operator/pkg/apis/extensions/v1alpha1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" ) func TestRpaasConfigurationRenderer_Render(t *testing.T) { + size100MB := resource.MustParse("100Mi") + size300MB := resource.MustParse("300Mi") + tests := []struct { name string blocks ConfigurationBlocks @@ -70,12 +74,12 @@ func TestRpaasConfigurationRenderer_Render(t *testing.T) { Config: &v1alpha1.NginxConfig{ CacheEnabled: v1alpha1.Bool(true), CachePath: "/path/to/cache/dir", - CacheZoneSize: "100m", + CacheZoneSize: &size100MB, }, Instance: &v1alpha1.RpaasInstance{}, }, assertion: func(t *testing.T, result string) { - assert.Regexp(t, `proxy_cache_path /path/to/cache/dir/nginx levels=1:2 keys_zone=rpaas:100m;`, result) + assert.Regexp(t, `proxy_cache_path /path/to/cache/dir/nginx levels=1:2 keys_zone=rpaas:104857600;`, result) assert.Regexp(t, `proxy_temp_path /path/to/cache/dir/nginx_tmp 1 2;`, result) assert.Regexp(t, `server { \s+listen 8800; @@ -83,6 +87,8 @@ func TestRpaasConfigurationRenderer_Render(t *testing.T) { \s+proxy_cache_purge rpaas \$1\$is_args\$args; \s+} \s+}`, result) + assert.Regexp(t, `proxy_cache rpaas;`, result) + }, }, { @@ -93,13 +99,13 @@ func TestRpaasConfigurationRenderer_Render(t *testing.T) { CachePath: "/path/to/cache/dir", CacheInactive: "12h", CacheLoaderFiles: 1000, - CacheSize: "300m", - CacheZoneSize: "100m", + CacheSize: &size300MB, + CacheZoneSize: &size100MB, }, Instance: &v1alpha1.RpaasInstance{}, }, assertion: func(t *testing.T, result string) { - assert.Regexp(t, `proxy_cache_path /path/to/cache/dir/nginx levels=1:2 keys_zone=rpaas:100m inactive=12h max_size=300m loader_files=1000;`, result) + assert.Regexp(t, `proxy_cache_path /path/to/cache/dir/nginx levels=1:2 keys_zone=rpaas:104857600 inactive=12h max_size=314572800 loader_files=1000;`, result) assert.Regexp(t, `proxy_temp_path /path/to/cache/dir/nginx_tmp 1 2;`, result) assert.Regexp(t, `server { \s+listen 8800; @@ -556,3 +562,26 @@ func Test_hasRootPath(t *testing.T) { }) } } + +func TestK8sQuantityToNginx(t *testing.T) { + type expectation struct { + k8sQuantity string + nginxQuantity string + } + + expectations := []expectation{ + {"100Ki", "102400"}, + {"100Mi", "104857600"}, + {"100M", "100000000"}, + {"1Gi", "1073741824"}, + {"1G", "1000000000"}, + {"1024Gi", "1099511627776"}, + {"2Ti", "2199023255552"}, + } + + for _, expectation := range expectations { + k8sQuantity := resource.MustParse(expectation.k8sQuantity) + nginxQuantity := k8sQuantityToNginx(&k8sQuantity) + assert.Equal(t, expectation.nginxQuantity, nginxQuantity) + } +} diff --git a/pkg/apis/extensions/v1alpha1/rpaasinstance.go b/pkg/apis/extensions/v1alpha1/rpaasinstance.go new file mode 100644 index 000000000..a27ee10b9 --- /dev/null +++ b/pkg/apis/extensions/v1alpha1/rpaasinstance.go @@ -0,0 +1,30 @@ +// Copyright 2020 tsuru authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1alpha1 + +const ( + teamOwnerLabel = "rpaas.extensions.tsuru.io/team-owner" +) + +func (i *RpaasInstance) SetTeamOwner(team string) { + newLabels := map[string]string{teamOwnerLabel: team} + i.Labels = mergeMap(i.Labels, newLabels) + i.Annotations = mergeMap(i.Annotations, newLabels) + i.Spec.PodTemplate.Labels = mergeMap(i.Spec.PodTemplate.Labels, newLabels) +} + +func (i *RpaasInstance) TeamOwner() string { + return i.Labels[teamOwnerLabel] +} + +func mergeMap(a, b map[string]string) map[string]string { + if a == nil { + return b + } + for k, v := range b { + a[k] = v + } + return a +} diff --git a/pkg/apis/extensions/v1alpha1/rpaasinstance_test.go b/pkg/apis/extensions/v1alpha1/rpaasinstance_test.go new file mode 100644 index 000000000..cb1bc1bda --- /dev/null +++ b/pkg/apis/extensions/v1alpha1/rpaasinstance_test.go @@ -0,0 +1,35 @@ +// Copyright 2020 tsuru authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package v1alpha1 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_SetTeamOwner(t *testing.T) { + instance := &RpaasInstance{} + instance.SetTeamOwner("team-one") + expected := map[string]string{teamOwnerLabel: "team-one"} + assert.Equal(t, expected, instance.Labels) + assert.Equal(t, expected, instance.Annotations) + assert.Equal(t, expected, instance.Spec.PodTemplate.Labels) + + instance.SetTeamOwner("team-two") + expected = map[string]string{teamOwnerLabel: "team-two"} + assert.Equal(t, expected, instance.Labels) + assert.Equal(t, expected, instance.Annotations) + assert.Equal(t, expected, instance.Spec.PodTemplate.Labels) +} + +func Test_GetTeamOwner(t *testing.T) { + instance := &RpaasInstance{} + owner := instance.TeamOwner() + assert.Equal(t, "", owner) + instance.SetTeamOwner("team-one") + owner = instance.TeamOwner() + assert.Equal(t, "team-one", owner) +} diff --git a/pkg/apis/extensions/v1alpha1/rpaasplan_types.go b/pkg/apis/extensions/v1alpha1/rpaasplan_types.go index 6eed4ebc2..e2f305b8d 100644 --- a/pkg/apis/extensions/v1alpha1/rpaasplan_types.go +++ b/pkg/apis/extensions/v1alpha1/rpaasplan_types.go @@ -6,6 +6,7 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -59,12 +60,16 @@ type NginxConfig struct { UpstreamKeepalive int `json:"upstreamKeepalive,omitempty"` - CacheEnabled *bool `json:"cacheEnabled,omitempty"` - CacheInactive string `json:"cacheInactive,omitempty"` - CacheLoaderFiles int `json:"cacheLoaderFiles,omitempty"` - CachePath string `json:"cachePath,omitempty"` - CacheSize string `json:"cacheSize,omitempty"` - CacheZoneSize string `json:"cacheZoneSize,omitempty"` + CacheEnabled *bool `json:"cacheEnabled,omitempty"` + CacheInactive string `json:"cacheInactive,omitempty"` + CacheLoaderFiles int `json:"cacheLoaderFiles,omitempty"` + CachePath string `json:"cachePath,omitempty"` + CacheSize *resource.Quantity `json:"cacheSize,omitempty"` + CacheZoneSize *resource.Quantity `json:"cacheZoneSize,omitempty"` + + CacheSnapshotEnabled bool `json:"cacheSnapshotEnabled,omitempty"` + CacheSnapshotStorage CacheSnapshotStorage `json:"cacheSnapshotStorage,omitempty"` + CacheSnapshotSync CacheSnapshotSyncSpec `json:"cacheSnapshotSync,omitempty"` HTTPListenOptions string `json:"httpListenOptions,omitempty"` HTTPSListenOptions string `json:"httpsListenOptions,omitempty"` @@ -81,6 +86,27 @@ type NginxConfig struct { WorkerConnections int `json:"workerConnections,omitempty"` } +type CacheSnapshotSyncSpec struct { + // Schedule is the the cron time string format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule,omitempty"` + + // Container is the image used to sync the containers + // default is bitnami/kubectl:latest + Image string `json:"image,omitempty"` + + // CmdPodToPVC is used to customize command used to sync memory cache (POD) to persistent storage (PVC) + CmdPodToPVC []string `json:"cmdPodToPVC,omitempty"` + + // CmdPVCToPod is used to customize command used to sync persistent storage (PVC) to memory cache (POD) + CmdPVCToPod []string `json:"cmdPVCToPod,omitempty"` +} + +type CacheSnapshotStorage struct { + StorageClassName *string `json:"storageClassName,omitempty"` + StorageSize *resource.Quantity `json:"storageSize,omitempty"` + VolumeLabels map[string]string `json:"volumeLabels,omitempty"` +} + func Bool(v bool) *bool { return &v } diff --git a/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go index c17e14f74..4a7700781 100644 --- a/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/extensions/v1alpha1/zz_generated.deepcopy.go @@ -47,6 +47,65 @@ func (in *Bind) DeepCopy() *Bind { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheSnapshotStorage) DeepCopyInto(out *CacheSnapshotStorage) { + *out = *in + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.StorageSize != nil { + in, out := &in.StorageSize, &out.StorageSize + x := (*in).DeepCopy() + *out = &x + } + if in.VolumeLabels != nil { + in, out := &in.VolumeLabels, &out.VolumeLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheSnapshotStorage. +func (in *CacheSnapshotStorage) DeepCopy() *CacheSnapshotStorage { + if in == nil { + return nil + } + out := new(CacheSnapshotStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheSnapshotSyncSpec) DeepCopyInto(out *CacheSnapshotSyncSpec) { + *out = *in + if in.CmdPodToPVC != nil { + in, out := &in.CmdPodToPVC, &out.CmdPodToPVC + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.CmdPVCToPod != nil { + in, out := &in.CmdPVCToPod, &out.CmdPVCToPod + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheSnapshotSyncSpec. +func (in *CacheSnapshotSyncSpec) DeepCopy() *CacheSnapshotSyncSpec { + if in == nil { + return nil + } + out := new(CacheSnapshotSyncSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Location) DeepCopyInto(out *Location) { *out = *in @@ -92,6 +151,18 @@ func (in *NginxConfig) DeepCopyInto(out *NginxConfig) { *out = new(bool) **out = **in } + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + x := (*in).DeepCopy() + *out = &x + } + if in.CacheZoneSize != nil { + in, out := &in.CacheZoneSize, &out.CacheZoneSize + x := (*in).DeepCopy() + *out = &x + } + in.CacheSnapshotStorage.DeepCopyInto(&out.CacheSnapshotStorage) + in.CacheSnapshotSync.DeepCopyInto(&out.CacheSnapshotSync) if in.VTSEnabled != nil { in, out := &in.VTSEnabled, &out.VTSEnabled *out = new(bool) diff --git a/pkg/controller/rpaasinstance/rpaasinstance_controller.go b/pkg/controller/rpaasinstance/rpaasinstance_controller.go index 5d540391f..2ab00c9b5 100644 --- a/pkg/controller/rpaasinstance/rpaasinstance_controller.go +++ b/pkg/controller/rpaasinstance/rpaasinstance_controller.go @@ -11,6 +11,7 @@ import ( "fmt" "reflect" "sort" + "strings" "text/template" "github.com/imdario/mergo" @@ -23,10 +24,11 @@ import ( "github.com/tsuru/rpaas-operator/pkg/util" "github.com/willf/bitset" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" k8sErrors "k8s.io/apimachinery/pkg/api/errors" - k8sResources "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -41,9 +43,45 @@ import ( ) const ( - defaultConfigHistoryLimit = 10 - + defaultConfigHistoryLimit = 10 + defaultCacheSnapshotCronImage = "bitnami/kubectl:latest" + defaultCacheSnapshotSchedule = "* * * * *" defaultPortAllocationResource = "default" + volumeTeamLabel = "tsuru.io/volume-team" + + cacheSnapshotCronJobSuffix = "-snapshot-cron-job" + cacheSnapshotVolumeSuffix = "-snapshot-volume" + + cacheSnapshotMountPoint = "/var/cache/cache-snapshot" + + rsyncCommandPodToPVC = "rsync -avz --recursive --delete --temp-dir=${CACHE_SNAPSHOT_MOUNTPOINT}/temp ${CACHE_PATH}/nginx ${CACHE_SNAPSHOT_MOUNTPOINT}" + rsyncCommandPVCToPod = "rsync -avz --recursive --delete --temp-dir=${CACHE_PATH}/nginx_tmp ${CACHE_SNAPSHOT_MOUNTPOINT}/nginx ${CACHE_PATH}" +) + +var ( + defaultCacheSnapshotCmdPodToPVC = []string{ + "/bin/bash", + "-c", + `pods=($(kubectl -n ${SERVICE_NAME} get pod -l rpaas.extensions.tsuru.io/service-name=${SERVICE_NAME} -l rpaas.extensions.tsuru.io/instance-name=${INSTANCE_NAME} --field-selector status.phase=Running -o=jsonpath='{.items[*].metadata.name}')); +for pod in ${pods[@]}; do + kubectl -n ${SERVICE_NAME} exec ${pod} -- ${POD_CMD}; + if [[ $? == 0 ]]; then + exit 0; + fi +done +echo "No pods found"; +exit 1 +`} + + defaultCacheSnapshotCmdPVCToPod = []string{ + "/bin/bash", + "-c", + ` +mkdir -p ${CACHE_SNAPSHOT_MOUNTPOINT}/temp; +mkdir -p ${CACHE_SNAPSHOT_MOUNTPOINT}/nginx; +mkdir -p ${CACHE_PATH}/nginx_tmp; +${POD_CMD} +`} ) var log = logf.Log.WithName("controller_rpaasinstance") @@ -81,10 +119,26 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } + err = c.Watch(&source.Kind{Type: &corev1.PersistentVolumeClaim{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &extensionsv1alpha1.RpaasInstance{}, + }) + if err != nil { + return err + } + err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{ IsController: true, OwnerType: &extensionsv1alpha1.RpaasInstance{}, }) + if err != nil { + return err + } + + err = c.Watch(&source.Kind{Type: &batchv1beta1.CronJob{}}, &handler.EnqueueRequestForOwner{ + IsController: true, + OwnerType: &extensionsv1alpha1.RpaasInstance{}, + }) return err } @@ -159,32 +213,36 @@ func (r *ReconcileRpaasInstance) Reconcile(request reconcile.Request) (reconcile } } - rendered, err := r.renderTemplate(instance, plan) + rendered, err := r.renderTemplate(ctx, instance, plan) if err != nil { return reconcile.Result{}, err } configMap := newConfigMap(instance, rendered) - err = r.reconcileConfigMap(configMap) + err = r.reconcileConfigMap(ctx, configMap) if err != nil { return reconcile.Result{}, err } - configList, err := r.listConfigs(instance) + configList, err := r.listConfigs(ctx, instance) if err != nil { return reconcile.Result{}, err } if shouldDeleteOldConfig(instance, configList) { - if err = r.deleteOldConfig(instance, configList); err != nil { + if err = r.deleteOldConfig(ctx, instance, configList); err != nil { return reconcile.Result{}, err } } nginx := newNginx(instance, plan, configMap) - if err = r.reconcileNginx(nginx); err != nil { + if err = r.reconcileNginx(ctx, nginx); err != nil { + return reconcile.Result{}, err + } + + if err = r.reconcileCacheSnapshot(ctx, instance, plan); err != nil { return reconcile.Result{}, err } - if err = r.reconcileHPA(ctx, *instance, *nginx); err != nil { + if err = r.reconcileHPA(ctx, instance, nginx); err != nil { return reconcile.Result{}, err } @@ -216,7 +274,7 @@ func (r *ReconcileRpaasInstance) mergeInstanceWithFlavors(ctx context.Context, i logger := log.WithName("mergeInstanceWithFlavors"). WithValues("RpaasInstance", types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}) - defaultFlavors, err := r.listDefaultFlavors(instance) + defaultFlavors, err := r.listDefaultFlavors(ctx, instance) if err != nil { return nil, err } @@ -281,9 +339,9 @@ func mergeInstanceWithFlavor(instance *v1alpha1.RpaasInstance, flavor v1alpha1.R return nil } -func (r *ReconcileRpaasInstance) listDefaultFlavors(instance *v1alpha1.RpaasInstance) ([]v1alpha1.RpaasFlavor, error) { +func (r *ReconcileRpaasInstance) listDefaultFlavors(ctx context.Context, instance *v1alpha1.RpaasInstance) ([]v1alpha1.RpaasFlavor, error) { flavorList := &v1alpha1.RpaasFlavorList{} - if err := r.client.List(context.TODO(), flavorList, client.InNamespace(instance.Namespace)); err != nil { + if err := r.client.List(ctx, flavorList, client.InNamespace(instance.Namespace)); err != nil { return nil, err } var result []v1alpha1.RpaasFlavor @@ -296,7 +354,7 @@ func (r *ReconcileRpaasInstance) listDefaultFlavors(instance *v1alpha1.RpaasInst return result, nil } -func (r *ReconcileRpaasInstance) reconcileHPA(ctx context.Context, instance v1alpha1.RpaasInstance, nginx nginxv1alpha1.Nginx) error { +func (r *ReconcileRpaasInstance) reconcileHPA(ctx context.Context, instance *v1alpha1.RpaasInstance, nginx *nginxv1alpha1.Nginx) error { logger := log.WithName("reconcileHPA"). WithValues("RpaasInstance", types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}). WithValues("Nginx", types.NamespacedName{Name: nginx.Name, Namespace: nginx.Namespace}) @@ -356,15 +414,15 @@ func (r *ReconcileRpaasInstance) reconcileHPA(ctx context.Context, instance v1al return nil } -func (r *ReconcileRpaasInstance) reconcileConfigMap(configMap *corev1.ConfigMap) error { +func (r *ReconcileRpaasInstance) reconcileConfigMap(ctx context.Context, configMap *corev1.ConfigMap) error { found := &corev1.ConfigMap{} - err := r.client.Get(context.TODO(), types.NamespacedName{Name: configMap.ObjectMeta.Name, Namespace: configMap.ObjectMeta.Namespace}, found) + err := r.client.Get(ctx, types.NamespacedName{Name: configMap.ObjectMeta.Name, Namespace: configMap.ObjectMeta.Namespace}, found) if err != nil { if !k8sErrors.IsNotFound(err) { logrus.Errorf("Failed to get configMap: %v", err) return err } - err = r.client.Create(context.TODO(), configMap) + err = r.client.Create(ctx, configMap) if err != nil { logrus.Errorf("Failed to create configMap: %v", err) return err @@ -373,22 +431,22 @@ func (r *ReconcileRpaasInstance) reconcileConfigMap(configMap *corev1.ConfigMap) } configMap.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion - err = r.client.Update(context.TODO(), configMap) + err = r.client.Update(ctx, configMap) if err != nil { logrus.Errorf("Failed to update configMap: %v", err) } return err } -func (r *ReconcileRpaasInstance) reconcileNginx(nginx *nginxv1alpha1.Nginx) error { +func (r *ReconcileRpaasInstance) reconcileNginx(ctx context.Context, nginx *nginxv1alpha1.Nginx) error { found := &nginxv1alpha1.Nginx{} - err := r.client.Get(context.TODO(), types.NamespacedName{Name: nginx.ObjectMeta.Name, Namespace: nginx.ObjectMeta.Namespace}, found) + err := r.client.Get(ctx, types.NamespacedName{Name: nginx.ObjectMeta.Name, Namespace: nginx.ObjectMeta.Namespace}, found) if err != nil { if !k8sErrors.IsNotFound(err) { logrus.Errorf("Failed to get nginx CR: %v", err) return err } - err = r.client.Create(context.TODO(), nginx) + err = r.client.Create(ctx, nginx) if err != nil { logrus.Errorf("Failed to create nginx CR: %v", err) return err @@ -397,20 +455,153 @@ func (r *ReconcileRpaasInstance) reconcileNginx(nginx *nginxv1alpha1.Nginx) erro } nginx.ObjectMeta.ResourceVersion = found.ObjectMeta.ResourceVersion - err = r.client.Update(context.TODO(), nginx) + err = r.client.Update(ctx, nginx) if err != nil { logrus.Errorf("Failed to update nginx CR: %v", err) } return err } -func (r *ReconcileRpaasInstance) renderTemplate(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) (string, error) { - blocks, err := r.getConfigurationBlocks(instance, plan) +func (r *ReconcileRpaasInstance) reconcileCacheSnapshot(ctx context.Context, instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) error { + if plan.Spec.Config.CacheSnapshotEnabled { + err := r.reconcileCacheSnapshotCronJob(ctx, instance, plan) + if err != nil { + return err + } + return r.reconcileCacheSnapshotVolume(ctx, instance, plan) + } + + err := r.destroyCacheSnapshotCronJob(ctx, instance) + if err != nil { + return err + } + return r.destroyCacheSnapshotVolume(ctx, instance) +} + +func (r *ReconcileRpaasInstance) reconcileCacheSnapshotCronJob(ctx context.Context, instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) error { + foundCronJob := &batchv1beta1.CronJob{} + cronName := instance.Name + cacheSnapshotCronJobSuffix + err := r.client.Get(ctx, types.NamespacedName{Name: cronName, Namespace: instance.Namespace}, foundCronJob) + if err != nil && !k8sErrors.IsNotFound(err) { + return err + } + + newestCronJob := newCronJob(instance, plan) + if k8sErrors.IsNotFound(err) { + return r.client.Create(ctx, newestCronJob) + } + + newestCronJob.ObjectMeta.ResourceVersion = foundCronJob.ObjectMeta.ResourceVersion + if !reflect.DeepEqual(foundCronJob.Spec, newestCronJob.Spec) { + return r.client.Update(ctx, newestCronJob) + } + + return nil +} + +func (r *ReconcileRpaasInstance) destroyCacheSnapshotCronJob(ctx context.Context, instance *v1alpha1.RpaasInstance) error { + cronName := instance.Name + cacheSnapshotCronJobSuffix + cronJob := &batchv1beta1.CronJob{} + + err := r.client.Get(ctx, types.NamespacedName{Name: cronName, Namespace: instance.Namespace}, cronJob) + isNotFound := k8sErrors.IsNotFound(err) + if err != nil && !isNotFound { + return err + } else if isNotFound { + return nil + } + + logrus.Infof("deleting cronjob %s", cronName) + return r.client.Delete(ctx, cronJob) +} +func (r *ReconcileRpaasInstance) reconcileCacheSnapshotVolume(ctx context.Context, instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) error { + pvcName := instance.Name + cacheSnapshotVolumeSuffix + + pvc := &corev1.PersistentVolumeClaim{} + err := r.client.Get(ctx, types.NamespacedName{Name: pvcName, Namespace: instance.Namespace}, pvc) + isNotFound := k8sErrors.IsNotFound(err) + if err != nil && !isNotFound { + return err + } else if !isNotFound { + return nil + } + + cacheSnapshotStorage := plan.Spec.Config.CacheSnapshotStorage + volumeMode := corev1.PersistentVolumeFilesystem + labels := labelsForRpaasInstance(instance) + if teamOwner := instance.TeamOwner(); teamOwner != "" { + labels[volumeTeamLabel] = teamOwner + } + for k, v := range cacheSnapshotStorage.VolumeLabels { + labels[k] = v + } + + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: instance.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(instance, schema.GroupVersionKind{ + Group: v1alpha1.SchemeGroupVersion.Group, + Version: v1alpha1.SchemeGroupVersion.Version, + Kind: "RpaasInstance", + }), + }, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteMany, + }, + VolumeMode: &volumeMode, + StorageClassName: cacheSnapshotStorage.StorageClassName, + }, + } + + storageSize := plan.Spec.Config.CacheSize + if cacheSnapshotStorage.StorageSize != nil && !cacheSnapshotStorage.StorageSize.IsZero() { + storageSize = cacheSnapshotStorage.StorageSize + } + + if storageSize != nil && !storageSize.IsZero() { + pvc.Spec.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + "storage": *storageSize, + }, + } + } + + logrus.Infof("creating PersistentVolumeClaim %s", pvcName) + return r.client.Create(ctx, pvc) +} + +func (r *ReconcileRpaasInstance) destroyCacheSnapshotVolume(ctx context.Context, instance *v1alpha1.RpaasInstance) error { + pvcName := instance.Name + cacheSnapshotVolumeSuffix + + pvc := &corev1.PersistentVolumeClaim{} + err := r.client.Get(ctx, types.NamespacedName{Name: pvcName, Namespace: instance.Namespace}, pvc) + isNotFound := k8sErrors.IsNotFound(err) + if err != nil && !isNotFound { + return err + } else if isNotFound { + return nil + } + + logrus.Infof("deleting PersistentVolumeClaim %s", pvcName) + return r.client.Delete(ctx, pvc) +} + +func (r *ReconcileRpaasInstance) renderTemplate(ctx context.Context, instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) (string, error) { + blocks, err := r.getConfigurationBlocks(ctx, instance, plan) if err != nil { return "", err } - if err = r.updateLocationValues(instance); err != nil { + if err = r.updateLocationValues(ctx, instance); err != nil { return "", err } @@ -425,11 +616,11 @@ func (r *ReconcileRpaasInstance) renderTemplate(instance *v1alpha1.RpaasInstance }) } -func (r *ReconcileRpaasInstance) getConfigurationBlocks(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) (nginx.ConfigurationBlocks, error) { +func (r *ReconcileRpaasInstance) getConfigurationBlocks(ctx context.Context, instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) (nginx.ConfigurationBlocks, error) { var blocks nginx.ConfigurationBlocks if plan.Spec.Template != nil { - mainBlock, err := util.GetValue(context.TODO(), r.client, "", plan.Spec.Template) + mainBlock, err := util.GetValue(ctx, r.client, "", plan.Spec.Template) if err != nil { return blocks, err } @@ -438,7 +629,7 @@ func (r *ReconcileRpaasInstance) getConfigurationBlocks(instance *v1alpha1.Rpaas } for blockType, blockValue := range instance.Spec.Blocks { - content, err := util.GetValue(context.TODO(), r.client, instance.Namespace, &blockValue) + content, err := util.GetValue(ctx, r.client, instance.Namespace, &blockValue) if err != nil { return blocks, err } @@ -460,13 +651,13 @@ func (r *ReconcileRpaasInstance) getConfigurationBlocks(instance *v1alpha1.Rpaas return blocks, nil } -func (r *ReconcileRpaasInstance) updateLocationValues(instance *v1alpha1.RpaasInstance) error { +func (r *ReconcileRpaasInstance) updateLocationValues(ctx context.Context, instance *v1alpha1.RpaasInstance) error { for _, location := range instance.Spec.Locations { if location.Content == nil { continue } - content, err := util.GetValue(context.TODO(), r.client, instance.Namespace, location.Content) + content, err := util.GetValue(ctx, r.client, instance.Namespace, location.Content) if err != nil { return err } @@ -476,7 +667,7 @@ func (r *ReconcileRpaasInstance) updateLocationValues(instance *v1alpha1.RpaasIn return nil } -func (r *ReconcileRpaasInstance) listConfigs(instance *v1alpha1.RpaasInstance) (*corev1.ConfigMapList, error) { +func (r *ReconcileRpaasInstance) listConfigs(ctx context.Context, instance *v1alpha1.RpaasInstance) (*corev1.ConfigMapList, error) { configList := &corev1.ConfigMapList{} listOptions := &client.ListOptions{Namespace: instance.ObjectMeta.Namespace} client.MatchingLabels(map[string]string{ @@ -484,16 +675,16 @@ func (r *ReconcileRpaasInstance) listConfigs(instance *v1alpha1.RpaasInstance) ( "type": "config", }).ApplyToList(listOptions) - err := r.client.List(context.TODO(), configList, listOptions) + err := r.client.List(ctx, configList, listOptions) return configList, err } -func (r *ReconcileRpaasInstance) deleteOldConfig(instance *v1alpha1.RpaasInstance, configList *corev1.ConfigMapList) error { +func (r *ReconcileRpaasInstance) deleteOldConfig(ctx context.Context, instance *v1alpha1.RpaasInstance, configList *corev1.ConfigMapList) error { list := configList.Items sort.Slice(list, func(i, j int) bool { return list[i].ObjectMeta.CreationTimestamp.String() < list[j].ObjectMeta.CreationTimestamp.String() }) - if err := r.client.Delete(context.TODO(), &list[0]); err != nil { + if err := r.client.Delete(ctx, &list[0]); err != nil { return err } return nil @@ -501,14 +692,15 @@ func (r *ReconcileRpaasInstance) deleteOldConfig(instance *v1alpha1.RpaasInstanc func newConfigMap(instance *v1alpha1.RpaasInstance, renderedTemplate string) *corev1.ConfigMap { hash := fmt.Sprintf("%x", sha256.Sum256([]byte(renderedTemplate))) + labels := labelsForRpaasInstance(instance) + labels["type"] = "config" + labels["instance"] = instance.Name + return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-config-%s", instance.Name, hash[:10]), Namespace: instance.Namespace, - Labels: map[string]string{ - "type": "config", - "instance": instance.Name, - }, + Labels: labels, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(instance, schema.GroupVersionKind{ Group: v1alpha1.SchemeGroupVersion.Group, @@ -532,12 +724,11 @@ func newNginx(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan, config if v1alpha1.BoolValue(plan.Spec.Config.CacheEnabled) { cacheConfig.Path = plan.Spec.Config.CachePath cacheConfig.InMemory = true - cacheMaxSize, err := k8sResources.ParseQuantity(plan.Spec.Config.CacheSize) - if err == nil && !cacheMaxSize.IsZero() { - cacheConfig.Size = &cacheMaxSize + if plan.Spec.Config.CacheSize != nil && !plan.Spec.Config.CacheSize.IsZero() { + cacheConfig.Size = plan.Spec.Config.CacheSize } } - return &nginxv1alpha1.Nginx{ + n := &nginxv1alpha1.Nginx{ ObjectMeta: metav1.ObjectMeta{ Name: instance.Name, Namespace: instance.Namespace, @@ -548,6 +739,7 @@ func newNginx(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan, config Kind: "RpaasInstance", }), }, + Labels: labelsForRpaasInstance(instance), }, TypeMeta: metav1.TypeMeta{ Kind: "Nginx", @@ -570,9 +762,56 @@ func newNginx(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan, config Lifecycle: instance.Spec.Lifecycle, }, } + + if !plan.Spec.Config.CacheSnapshotEnabled { + return n + } + + initCmd := defaultCacheSnapshotCmdPVCToPod + if len(plan.Spec.Config.CacheSnapshotSync.CmdPVCToPod) > 0 { + initCmd = plan.Spec.Config.CacheSnapshotSync.CmdPVCToPod + } + + n.Spec.PodTemplate.Volumes = append(n.Spec.PodTemplate.Volumes, corev1.Volume{ + Name: "cache-snapshot-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: instance.Name + cacheSnapshotVolumeSuffix, + }, + }, + }) + + cacheSnapshotVolume := corev1.VolumeMount{ + Name: "cache-snapshot-volume", + MountPath: cacheSnapshotMountPoint, + } + + n.Spec.PodTemplate.VolumeMounts = append(n.Spec.PodTemplate.VolumeMounts, cacheSnapshotVolume) + + n.Spec.PodTemplate.InitContainers = append(n.Spec.PodTemplate.InitContainers, corev1.Container{ + Name: "restore-snapshot", + Image: plan.Spec.Image, + Command: []string{ + initCmd[0], + }, + Args: initCmd[1:], + VolumeMounts: []corev1.VolumeMount{ + cacheSnapshotVolume, + { + Name: "cache-vol", + MountPath: plan.Spec.Config.CachePath, + }, + }, + Env: append(cacheSnapshotEnvVars(instance, plan), corev1.EnvVar{ + Name: "POD_CMD", + Value: interpolateCacheSnapshotPodCmdTemplate(rsyncCommandPVCToPod, plan), + }), + }) + + return n } -func newHPA(instance v1alpha1.RpaasInstance, nginx nginxv1alpha1.Nginx) autoscalingv2beta2.HorizontalPodAutoscaler { +func newHPA(instance *v1alpha1.RpaasInstance, nginx *nginxv1alpha1.Nginx) autoscalingv2beta2.HorizontalPodAutoscaler { var metrics []autoscalingv2beta2.MetricSpec if instance.Spec.Autoscale.TargetCPUUtilizationPercentage != nil { @@ -615,12 +854,13 @@ func newHPA(instance v1alpha1.RpaasInstance, nginx nginxv1alpha1.Nginx) autoscal Name: instance.Name, Namespace: instance.Namespace, OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(&instance, schema.GroupVersionKind{ + *metav1.NewControllerRef(instance, schema.GroupVersionKind{ Group: v1alpha1.SchemeGroupVersion.Group, Version: v1alpha1.SchemeGroupVersion.Version, Kind: "RpaasInstance", }), }, + Labels: labelsForRpaasInstance(instance), }, Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscalingv2beta2.CrossVersionObjectReference{ @@ -635,6 +875,95 @@ func newHPA(instance v1alpha1.RpaasInstance, nginx nginxv1alpha1.Nginx) autoscal } } +func newCronJob(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) *batchv1beta1.CronJob { + cronName := instance.Name + cacheSnapshotCronJobSuffix + + schedule := defaultCacheSnapshotSchedule + if plan.Spec.Config.CacheSnapshotSync.Schedule != "" { + schedule = plan.Spec.Config.CacheSnapshotSync.Schedule + } + + image := defaultCacheSnapshotCronImage + if plan.Spec.Config.CacheSnapshotSync.Image != "" { + image = plan.Spec.Config.CacheSnapshotSync.Image + } + + cmds := defaultCacheSnapshotCmdPodToPVC + if len(plan.Spec.Config.CacheSnapshotSync.CmdPodToPVC) > 0 { + cmds = plan.Spec.Config.CacheSnapshotSync.CmdPodToPVC + } + jobLabels := labelsForRpaasInstance(instance) + jobLabels["log-app-name"] = instance.Name + jobLabels["log-process-name"] = "cache-synchronize" + + return &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: cronName, + Namespace: instance.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(instance, schema.GroupVersionKind{ + Group: v1alpha1.SchemeGroupVersion.Group, + Version: v1alpha1.SchemeGroupVersion.Version, + Kind: "RpaasInstance", + }), + }, + Labels: labelsForRpaasInstance(instance), + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "batch/v1beta1", + Kind: "CronJob", + }, + Spec: batchv1beta1.CronJobSpec{ + Schedule: schedule, + ConcurrencyPolicy: batchv1beta1.ForbidConcurrent, + JobTemplate: batchv1beta1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: jobLabels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: "rpaas-cache-snapshot-cronjob", + Containers: []corev1.Container{ + { + Name: "cache-synchronize", + Image: image, + Command: []string{ + cmds[0], + }, + Args: cmds[1:], + Env: append(cacheSnapshotEnvVars(instance, plan), corev1.EnvVar{ + Name: "POD_CMD", + Value: interpolateCacheSnapshotPodCmdTemplate(rsyncCommandPodToPVC, plan), + }), + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + }, + }, + } +} + +func interpolateCacheSnapshotPodCmdTemplate(podCmd string, plan *v1alpha1.RpaasPlan) string { + replacer := strings.NewReplacer( + "${CACHE_SNAPSHOT_MOUNTPOINT}", cacheSnapshotMountPoint, + "${CACHE_PATH}", plan.Spec.Config.CachePath, + ) + return replacer.Replace(podCmd) +} + +func cacheSnapshotEnvVars(instance *v1alpha1.RpaasInstance, plan *v1alpha1.RpaasPlan) []corev1.EnvVar { + return []corev1.EnvVar{ + {Name: "SERVICE_NAME", Value: instance.Namespace}, + {Name: "INSTANCE_NAME", Value: instance.Name}, + {Name: "CACHE_SNAPSHOT_MOUNTPOINT", Value: cacheSnapshotMountPoint}, + {Name: "CACHE_PATH", Value: plan.Spec.Config.CachePath}, + } +} + func shouldDeleteOldConfig(instance *v1alpha1.RpaasInstance, configList *corev1.ConfigMapList) bool { limit := defaultConfigHistoryLimit @@ -841,3 +1170,10 @@ func (r *ReconcileRpaasInstance) reconcilePorts(ctx context.Context, instance *e return instancePorts, nil } + +func labelsForRpaasInstance(instance *extensionsv1alpha1.RpaasInstance) map[string]string { + return map[string]string{ + "rpaas.extensions.tsuru.io/instance-name": instance.Name, + "rpaas.extensions.tsuru.io/plan-name": instance.Spec.PlanName, + } +} diff --git a/pkg/controller/rpaasinstance/rpaasinstance_controller_test.go b/pkg/controller/rpaasinstance/rpaasinstance_controller_test.go index fcfd0071a..8c1bf6107 100644 --- a/pkg/controller/rpaasinstance/rpaasinstance_controller_test.go +++ b/pkg/controller/rpaasinstance/rpaasinstance_controller_test.go @@ -15,6 +15,8 @@ import ( "github.com/tsuru/rpaas-operator/pkg/apis" "github.com/tsuru/rpaas-operator/pkg/apis/extensions/v1alpha1" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -22,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" ) func Test_mergePlans(t *testing.T) { @@ -58,7 +61,7 @@ func Test_mergePlans(t *testing.T) { Description: "a", Config: v1alpha1.NginxConfig{ User: "root", - CacheSize: "10", + CacheSize: resourceMustParsePtr("10M"), CacheEnabled: v1alpha1.Bool(true), }, }, @@ -73,7 +76,7 @@ func Test_mergePlans(t *testing.T) { Description: "a", Config: v1alpha1.NginxConfig{ User: "ubuntu", - CacheSize: "10", + CacheSize: resourceMustParsePtr("10M"), CacheEnabled: v1alpha1.Bool(true), }, }, @@ -84,7 +87,7 @@ func Test_mergePlans(t *testing.T) { Description: "a", Config: v1alpha1.NginxConfig{ User: "root", - CacheSize: "10", + CacheSize: resourceMustParsePtr("10M"), CacheEnabled: v1alpha1.Bool(true), }, }, @@ -100,7 +103,7 @@ func Test_mergePlans(t *testing.T) { Description: "a", Config: v1alpha1.NginxConfig{ User: "ubuntu", - CacheSize: "10", + CacheSize: resourceMustParsePtr("10M"), CacheEnabled: v1alpha1.Bool(false), }, }, @@ -286,6 +289,7 @@ func TestReconcileRpaasInstance_getRpaasInstance(t *testing.T) { Namespace: instance1.Namespace, }, Spec: v1alpha1.RpaasInstanceSpec{ + PlanName: "my-plan", Service: &nginxv1alpha1.NginxService{ Annotations: map[string]string{ "default-service-annotation": "default", @@ -320,7 +324,8 @@ func TestReconcileRpaasInstance_getRpaasInstance(t *testing.T) { Namespace: instance2.Namespace, }, Spec: v1alpha1.RpaasInstanceSpec{ - Flavors: []string{"mint"}, + Flavors: []string{"mint"}, + PlanName: "my-plan", Lifecycle: &nginxv1alpha1.NginxLifecycle{ PostStart: &nginxv1alpha1.NginxLifecycleHandler{ Exec: &corev1.ExecAction{ @@ -375,7 +380,8 @@ func TestReconcileRpaasInstance_getRpaasInstance(t *testing.T) { Namespace: instance3.Namespace, }, Spec: v1alpha1.RpaasInstanceSpec{ - Flavors: []string{"mint", "mango"}, + Flavors: []string{"mint", "mango"}, + PlanName: "my-plan", Service: &nginxv1alpha1.NginxService{ Annotations: map[string]string{ "default-service-annotation": "default", @@ -427,6 +433,7 @@ func TestReconcileRpaasInstance_getRpaasInstance(t *testing.T) { }, }, Spec: v1alpha1.RpaasInstanceSpec{ + PlanName: "my-plan", Service: &nginxv1alpha1.NginxService{ Annotations: map[string]string{ "default-service-annotation": "default", @@ -528,14 +535,14 @@ func Test_reconcileHPA(t *testing.T) { tests := []struct { name string - instance v1alpha1.RpaasInstance - nginx nginxv1alpha1.Nginx + instance *v1alpha1.RpaasInstance + nginx *nginxv1alpha1.Nginx assertion func(t *testing.T, err error, got *autoscalingv2beta2.HorizontalPodAutoscaler) }{ { name: "when there is HPA resource but autoscale spec is nil", - instance: *instance2, - nginx: *nginx2, + instance: instance2, + nginx: nginx2, assertion: func(t *testing.T, err error, got *autoscalingv2beta2.HorizontalPodAutoscaler) { require.Error(t, err) assert.True(t, k8sErrors.IsNotFound(err)) @@ -543,8 +550,8 @@ func Test_reconcileHPA(t *testing.T) { }, { name: "when there is no HPA resource but autoscale spec is provided", - instance: *instance1, - nginx: *nginx1, + instance: instance1, + nginx: nginx1, assertion: func(t *testing.T, err error, got *autoscalingv2beta2.HorizontalPodAutoscaler) { require.NoError(t, err) require.NotNil(t, got) @@ -576,11 +583,16 @@ func Test_reconcileHPA(t *testing.T) { }, }, }, got.Spec.Metrics[1]) + + assert.Equal(t, map[string]string{ + "rpaas.extensions.tsuru.io/instance-name": "instance-1", + "rpaas.extensions.tsuru.io/plan-name": "my-plan", + }, got.ObjectMeta.Labels) }, }, { name: "when there is HPA resource but differs from autoscale spec", - instance: v1alpha1.RpaasInstance{ + instance: &v1alpha1.RpaasInstance{ TypeMeta: metav1.TypeMeta{ APIVersion: "extensions.tsuru.io/v1alpha1", Kind: "RpaasInstance", @@ -598,7 +610,7 @@ func Test_reconcileHPA(t *testing.T) { }, }, }, - nginx: *nginx2, + nginx: nginx2, assertion: func(t *testing.T, err error, got *autoscalingv2beta2.HorizontalPodAutoscaler) { require.NoError(t, err) require.NotNil(t, got) @@ -655,10 +667,279 @@ func Test_reconcileHPA(t *testing.T) { } } +func Test_reconcileSnapshotVolume(t *testing.T) { + ctx := context.TODO() + rpaasInstance := newEmptyRpaasInstance() + rpaasInstance.Name = "my-instance" + rpaasInstance.SetTeamOwner("team-one") + + tests := []struct { + name string + planSpec v1alpha1.RpaasPlanSpec + assert func(*testing.T, *corev1.PersistentVolumeClaim) + }{ + { + name: "Should repass attributes to PVC", + planSpec: v1alpha1.RpaasPlanSpec{ + Config: v1alpha1.NginxConfig{ + CacheSize: resourceMustParsePtr("10Gi"), + CacheSnapshotStorage: v1alpha1.CacheSnapshotStorage{ + StorageClassName: strPtr("my-storage-class"), + }, + }, + }, + assert: func(t *testing.T, pvc *corev1.PersistentVolumeClaim) { + assert.Equal(t, pvc.ObjectMeta.OwnerReferences[0].Kind, "RpaasInstance") + assert.Equal(t, pvc.ObjectMeta.OwnerReferences[0].Name, rpaasInstance.Name) + assert.Equal(t, pvc.Spec.StorageClassName, strPtr("my-storage-class")) + assert.Equal(t, pvc.Spec.AccessModes, []corev1.PersistentVolumeAccessMode{corev1.ReadWriteMany}) + + parsedSize, _ := resource.ParseQuantity("10Gi") + assert.Equal(t, parsedSize, pvc.Spec.Resources.Requests["storage"]) + }, + }, + { + name: "Should repass volume labels to PVC", + planSpec: v1alpha1.RpaasPlanSpec{ + Config: v1alpha1.NginxConfig{ + CacheSnapshotStorage: v1alpha1.CacheSnapshotStorage{ + StorageClassName: strPtr("my-storage-class"), + VolumeLabels: map[string]string{ + "some-label": "foo", + "other-label": "bar", + }, + }, + }, + }, + assert: func(t *testing.T, pvc *corev1.PersistentVolumeClaim) { + assert.Equal(t, 5, len(pvc.ObjectMeta.Labels)) + assert.Equal(t, map[string]string{ + "some-label": "foo", + "other-label": "bar", + "tsuru.io/volume-team": "team-one", + "rpaas.extensions.tsuru.io/instance-name": "my-instance", + "rpaas.extensions.tsuru.io/plan-name": "my-plan", + }, pvc.ObjectMeta.Labels) + }, + }, + + { + name: "Should priorize the team inside plan", + planSpec: v1alpha1.RpaasPlanSpec{ + Config: v1alpha1.NginxConfig{ + CacheSnapshotStorage: v1alpha1.CacheSnapshotStorage{ + VolumeLabels: map[string]string{ + "tsuru.io/volume-team": "another-team", + }, + }, + }, + }, + assert: func(t *testing.T, pvc *corev1.PersistentVolumeClaim) { + assert.Equal(t, "another-team", pvc.ObjectMeta.Labels["tsuru.io/volume-team"]) + }, + }, + { + name: "Should allow to customize size of PVC separately of cache settings", + planSpec: v1alpha1.RpaasPlanSpec{ + Config: v1alpha1.NginxConfig{ + CacheSize: resourceMustParsePtr("10Gi"), + CacheSnapshotStorage: v1alpha1.CacheSnapshotStorage{ + StorageSize: resourceMustParsePtr("100Gi"), + }, + }, + }, + assert: func(t *testing.T, pvc *corev1.PersistentVolumeClaim) { + parsedSize, _ := resource.ParseQuantity("100Gi") + assert.Equal(t, parsedSize, pvc.Spec.Resources.Requests["storage"]) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resources := []runtime.Object{} + scheme := newScheme() + corev1.AddToScheme(scheme) + + k8sClient := fake.NewFakeClientWithScheme(scheme, resources...) + reconciler := &ReconcileRpaasInstance{ + client: k8sClient, + scheme: newScheme(), + } + err := reconciler.reconcileCacheSnapshotVolume(ctx, rpaasInstance, &v1alpha1.RpaasPlan{Spec: tt.planSpec}) + require.NoError(t, err) + + pvc := &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: rpaasInstance.Name + "-snapshot-volume", + Namespace: rpaasInstance.Namespace, + }, pvc) + require.NoError(t, err) + + tt.assert(t, pvc) + }) + } + +} + +func Test_destroySnapshotVolume(t *testing.T) { + ctx := context.TODO() + instance1 := newEmptyRpaasInstance() + instance1.Name = "instance-1" + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "instance-1-snapshot-volume", + Namespace: "default", + }, + } + resources := []runtime.Object{pvc} + scheme := newScheme() + corev1.AddToScheme(scheme) + + k8sClient := fake.NewFakeClientWithScheme(scheme, resources...) + reconciler := &ReconcileRpaasInstance{ + client: k8sClient, + scheme: newScheme(), + } + + err := reconciler.destroyCacheSnapshotVolume(ctx, instance1) + require.NoError(t, err) + + pvc = &corev1.PersistentVolumeClaim{} + err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: instance1.Name + "-snapshot-volume", Namespace: instance1.Namespace}, pvc) + require.True(t, k8sErrors.IsNotFound(err)) +} + +func Test_reconcileCacheSnapshotCronJobCreation(t *testing.T) { + ctx := context.TODO() + instance1 := newEmptyRpaasInstance() + instance1.Name = "instance-1" + + resources := []runtime.Object{} + scheme := newScheme() + corev1.AddToScheme(scheme) + batchv1beta1.AddToScheme(scheme) + + k8sClient := fake.NewFakeClientWithScheme(scheme, resources...) + reconciler := &ReconcileRpaasInstance{ + client: k8sClient, + scheme: newScheme(), + } + + plan := &v1alpha1.RpaasPlan{ + Spec: v1alpha1.RpaasPlanSpec{}, + } + + err := reconciler.reconcileCacheSnapshotCronJob(ctx, instance1, plan) + require.NoError(t, err) + + cronJob := &batchv1beta1.CronJob{} + err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: instance1.Name + "-snapshot-cron-job", Namespace: instance1.Namespace}, cronJob) + require.NoError(t, err) + + assert.Equal(t, "RpaasInstance", cronJob.ObjectMeta.OwnerReferences[0].Kind) + assert.Equal(t, instance1.Name, cronJob.ObjectMeta.OwnerReferences[0].Name) + + assert.Equal(t, map[string]string{ + "rpaas.extensions.tsuru.io/instance-name": "instance-1", + "rpaas.extensions.tsuru.io/plan-name": "my-plan", + }, cronJob.ObjectMeta.Labels) + + assert.Equal(t, map[string]string{ + "log-app-name": "instance-1", + "log-process-name": "cache-synchronize", + "rpaas.extensions.tsuru.io/instance-name": "instance-1", + "rpaas.extensions.tsuru.io/plan-name": "my-plan", + }, cronJob.Spec.JobTemplate.Spec.Template.ObjectMeta.Labels) +} + +func Test_reconcileCacheSnapshotCronJobUpdate(t *testing.T) { + ctx := context.TODO() + instance1 := newEmptyRpaasInstance() + instance1.Name = "instance-1" + + previousCronJob := &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance1.Name + "-snapshot-cronjob", + }, + Spec: batchv1beta1.CronJobSpec{ + Schedule: "old-schedule", + }, + } + + resources := []runtime.Object{previousCronJob} + scheme := newScheme() + corev1.AddToScheme(scheme) + batchv1beta1.AddToScheme(scheme) + + k8sClient := fake.NewFakeClientWithScheme(scheme, resources...) + reconciler := &ReconcileRpaasInstance{ + client: k8sClient, + scheme: newScheme(), + } + + plan := &v1alpha1.RpaasPlan{ + Spec: v1alpha1.RpaasPlanSpec{ + Config: v1alpha1.NginxConfig{ + CacheSnapshotSync: v1alpha1.CacheSnapshotSyncSpec{ + Schedule: "new-schedule", + }, + }, + }, + } + + err := reconciler.reconcileCacheSnapshotCronJob(ctx, instance1, plan) + require.NoError(t, err) + + cronJob := &batchv1beta1.CronJob{} + err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: instance1.Name + "-snapshot-cron-job", Namespace: instance1.Namespace}, cronJob) + require.NoError(t, err) + + assert.Equal(t, "RpaasInstance", cronJob.ObjectMeta.OwnerReferences[0].Kind) + assert.Equal(t, instance1.Name, cronJob.ObjectMeta.OwnerReferences[0].Name) + assert.Equal(t, "new-schedule", cronJob.Spec.Schedule) +} + +func Test_destroySnapshotCronJob(t *testing.T) { + ctx := context.TODO() + instance1 := newEmptyRpaasInstance() + instance1.Name = "instance-1" + + cronJob := &batchv1beta1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance1.Name + "-snapshot-cron-job", + Namespace: instance1.Namespace, + }, + } + + resources := []runtime.Object{cronJob} + scheme := newScheme() + corev1.AddToScheme(scheme) + batchv1beta1.AddToScheme(scheme) + + k8sClient := fake.NewFakeClientWithScheme(scheme, resources...) + reconciler := &ReconcileRpaasInstance{ + client: k8sClient, + scheme: newScheme(), + } + + err := reconciler.destroyCacheSnapshotCronJob(ctx, instance1) + require.NoError(t, err) + + cronJob = &batchv1beta1.CronJob{} + + err = k8sClient.Get(context.TODO(), types.NamespacedName{Name: instance1.Name + "-snapshot-cron-job", Namespace: instance1.Namespace}, cronJob) + require.True(t, k8sErrors.IsNotFound(err)) +} func int32Ptr(n int32) *int32 { return &n } +func strPtr(s string) *string { + return &s +} + func newEmptyRpaasInstance() *v1alpha1.RpaasInstance { return &v1alpha1.RpaasInstance{ TypeMeta: metav1.TypeMeta{ @@ -669,7 +950,9 @@ func newEmptyRpaasInstance() *v1alpha1.RpaasInstance { Name: "my-instance", Namespace: "default", }, - Spec: v1alpha1.RpaasInstanceSpec{}, + Spec: v1alpha1.RpaasInstanceSpec{ + PlanName: "my-plan", + }, } } @@ -1183,3 +1466,117 @@ func TestReconcileNginx_reconcilePorts(t *testing.T) { }) } } + +func TestReconcile(t *testing.T) { + rpaas := &v1alpha1.RpaasInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-instance", + Namespace: "default", + }, + Spec: v1alpha1.RpaasInstanceSpec{ + PlanName: "my-plan", + }, + } + plan := &v1alpha1.RpaasPlan{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-plan", + Namespace: "default", + }, + Spec: v1alpha1.RpaasPlanSpec{ + Image: "tsuru:mynginx:test", + Config: v1alpha1.NginxConfig{ + CacheEnabled: v1alpha1.Bool(true), + CacheSize: resourceMustParsePtr("100M"), + CacheSnapshotEnabled: true, + CacheSnapshotStorage: v1alpha1.CacheSnapshotStorage{ + StorageClassName: strPtr("my-storage-class"), + }, + CachePath: "/var/cache/nginx/rpaas", + CacheSnapshotSync: v1alpha1.CacheSnapshotSyncSpec{ + Schedule: "1 * * * *", + Image: "test/test:latest", + CmdPodToPVC: []string{ + "/bin/bash", + "-c", + "echo 'this is a test'", + }, + CmdPVCToPod: []string{ + "/bin/bash", + "-c", + "echo 'this is a the first pod sync'", + }, + }, + }, + }, + } + resources := []runtime.Object{rpaas, plan} + scheme := newScheme() + corev1.AddToScheme(scheme) + batchv1.AddToScheme(scheme) + batchv1beta1.AddToScheme(scheme) + client := fake.NewFakeClientWithScheme(scheme, resources...) + reconciler := &ReconcileRpaasInstance{ + client: client, + scheme: scheme, + } + result, err := reconciler.Reconcile(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: "default", Name: "my-instance"}}) + require.NoError(t, err) + + assert.Equal(t, result, reconcile.Result{}) + + nginx := &nginxv1alpha1.Nginx{} + err = client.Get(context.TODO(), types.NamespacedName{Name: rpaas.Name, Namespace: rpaas.Namespace}, nginx) + require.NoError(t, err) + + assert.Equal(t, "cache-snapshot-volume", nginx.Spec.PodTemplate.Volumes[0].Name) + assert.Equal(t, &corev1.PersistentVolumeClaimVolumeSource{ClaimName: "my-instance-snapshot-volume"}, nginx.Spec.PodTemplate.Volumes[0].PersistentVolumeClaim) + assert.Equal(t, "cache-snapshot-volume", nginx.Spec.PodTemplate.VolumeMounts[0].Name) + assert.Equal(t, "/var/cache/cache-snapshot", nginx.Spec.PodTemplate.VolumeMounts[0].MountPath) + + assert.Equal(t, resource.MustParse("100M"), *nginx.Spec.Cache.Size) + + initContainer := nginx.Spec.PodTemplate.InitContainers[0] + assert.Equal(t, "restore-snapshot", initContainer.Name) + assert.Equal(t, "tsuru:mynginx:test", initContainer.Image) + assert.Equal(t, "/bin/bash", initContainer.Command[0]) + assert.Equal(t, "-c", initContainer.Args[0]) + assert.Equal(t, "echo 'this is a the first pod sync'", initContainer.Args[1]) + assert.Equal(t, []corev1.EnvVar{ + {Name: "SERVICE_NAME", Value: "default"}, + {Name: "INSTANCE_NAME", Value: "my-instance"}, + {Name: "CACHE_SNAPSHOT_MOUNTPOINT", Value: "/var/cache/cache-snapshot"}, + {Name: "CACHE_PATH", Value: "/var/cache/nginx/rpaas"}, + {Name: "POD_CMD", Value: "rsync -avz --recursive --delete --temp-dir=/var/cache/nginx/rpaas/nginx_tmp /var/cache/cache-snapshot/nginx /var/cache/nginx/rpaas"}, + }, initContainer.Env) + + assert.Equal(t, []corev1.VolumeMount{ + {Name: "cache-snapshot-volume", MountPath: "/var/cache/cache-snapshot"}, + {Name: "cache-vol", MountPath: "/var/cache/nginx/rpaas"}, + }, initContainer.VolumeMounts) + + cronJob := &batchv1beta1.CronJob{} + err = client.Get(context.TODO(), types.NamespacedName{Name: "my-instance-snapshot-cron-job", Namespace: rpaas.Namespace}, cronJob) + require.NoError(t, err) + + assert.Equal(t, "1 * * * *", cronJob.Spec.Schedule) + podTemplateSpec := cronJob.Spec.JobTemplate.Spec.Template + podSpec := podTemplateSpec.Spec + assert.Equal(t, "test/test:latest", podSpec.Containers[0].Image) + assert.Equal(t, "/bin/bash", podSpec.Containers[0].Command[0]) + assert.Equal(t, "-c", podSpec.Containers[0].Args[0]) + assert.Equal(t, "echo 'this is a test'", podSpec.Containers[0].Args[1]) + assert.Equal(t, "my-instance", podTemplateSpec.ObjectMeta.Labels["log-app-name"]) + assert.Equal(t, "cache-synchronize", podTemplateSpec.ObjectMeta.Labels["log-process-name"]) + assert.Equal(t, []corev1.EnvVar{ + {Name: "SERVICE_NAME", Value: "default"}, + {Name: "INSTANCE_NAME", Value: "my-instance"}, + {Name: "CACHE_SNAPSHOT_MOUNTPOINT", Value: "/var/cache/cache-snapshot"}, + {Name: "CACHE_PATH", Value: "/var/cache/nginx/rpaas"}, + {Name: "POD_CMD", Value: "rsync -avz --recursive --delete --temp-dir=/var/cache/cache-snapshot/temp /var/cache/nginx/rpaas/nginx /var/cache/cache-snapshot"}, + }, podSpec.Containers[0].Env) +} + +func resourceMustParsePtr(fmt string) *resource.Quantity { + qty := resource.MustParse(fmt) + return &qty +} diff --git a/test/integration_test.go b/test/integration_test.go index 249100579..c89779caa 100644 --- a/test/integration_test.go +++ b/test/integration_test.go @@ -36,6 +36,16 @@ func TestMain(m *testing.M) { os.Exit(m.Run()) } +func assertInstanceContains(t *testing.T, localPort int, expectedStatus int, bodyPart string) { + rsp, iErr := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", localPort)) + require.NoError(t, iErr) + assert.Equal(t, expectedStatus, rsp.StatusCode) + defer rsp.Body.Close() + rawBody, iErr := ioutil.ReadAll(rsp.Body) + require.NoError(t, iErr) + assert.Contains(t, string(rawBody), bodyPart) +} + func Test_RpaasOperator(t *testing.T) { t.Run("apply manifests at rpaas-full.yaml", func(t *testing.T) { namespaceName := "rpaasoperator-full" + strconv.Itoa(rand.Int()) @@ -50,7 +60,7 @@ func Test_RpaasOperator(t *testing.T) { nginx, err := getReadyNginx("my-instance", namespaceName, 2, 1) require.NoError(t, err) assert.Equal(t, int32(2), *nginx.Spec.Replicas) - assert.Equal(t, "tsuru/nginx-tsuru:1.15.0", nginx.Spec.Image) + assert.Equal(t, "tsuru/nginx-tsuru:1.16.1", nginx.Spec.Image) assert.Equal(t, "/_nginx_healthcheck", nginx.Spec.HealthcheckPath) assert.Len(t, nginx.Status.Pods, 2) for _, podStatus := range nginx.Status.Pods { @@ -176,7 +186,7 @@ func Test_RpaasApi(t *testing.T) { require.NoError(t, err) require.NotNil(t, nginx) assert.Equal(t, int32(1), *nginx.Spec.Replicas) - assert.Equal(t, "tsuru/nginx-tsuru:1.15.0", nginx.Spec.Image) + assert.Equal(t, "tsuru/nginx-tsuru:1.16.1", nginx.Spec.Image) assert.Equal(t, "/_nginx_healthcheck", nginx.Spec.HealthcheckPath) nginxService := &corev1.Service{ @@ -213,22 +223,13 @@ func Test_RpaasApi(t *testing.T) { _, err = kubectl("wait", "--for=condition=Ready", "-l", "app=hello", "pod", "--timeout", "2m", "-n", namespaceName) require.NoError(t, err) - assertInstanceReturns := func(localPort int, expectedBody string) { - rsp, iErr := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", localPort)) - require.NoError(t, iErr) - defer rsp.Body.Close() - rawBody, iErr := ioutil.ReadAll(rsp.Body) - require.NoError(t, iErr) - assert.Equal(t, expectedBody, string(rawBody)) - } - serviceName := fmt.Sprintf("svc/%s-service", instanceName) servicePort := "80" ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturns(localPort, "instance not bound\n") + assertInstanceContains(t, localPort, http.StatusNotFound, "instance not bound") }) require.NoError(t, err) @@ -244,7 +245,7 @@ func Test_RpaasApi(t *testing.T) { ctx, cancel = context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturns(localPort, "Hello World!") + assertInstanceContains(t, localPort, http.StatusOK, "CLIENT VALUES") }) require.NoError(t, err) @@ -259,7 +260,7 @@ func Test_RpaasApi(t *testing.T) { ctx, cancel = context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturns(localPort, "instance not bound\n") + assertInstanceContains(t, localPort, http.StatusNotFound, "instance not bound") }) require.NoError(t, err) }) @@ -294,22 +295,13 @@ func Test_RpaasApi(t *testing.T) { _, err = kubectl("wait", "--for=condition=Ready", "-l", "app=echo-server", "pod", "--timeout", "2m", "-n", namespaceName) require.NoError(t, err) - assertInstanceReturns := func(localPort int, expectedBody string) { - rsp, iErr := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", localPort)) - require.NoError(t, iErr) - defer rsp.Body.Close() - rawBody, iErr := ioutil.ReadAll(rsp.Body) - require.NoError(t, iErr) - assert.Equal(t, expectedBody, string(rawBody)) - } - serviceName := fmt.Sprintf("svc/%s-service", instanceName) servicePort := "80" ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturns(localPort, "instance not bound\n") + assertInstanceContains(t, localPort, http.StatusNotFound, "instance not bound") }) require.NoError(t, err) @@ -328,7 +320,7 @@ func Test_RpaasApi(t *testing.T) { ctx, cancel = context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturns(localPort, "Hello World!") + assertInstanceContains(t, localPort, http.StatusOK, "CLIENT VALUES") }) require.NoError(t, err) @@ -340,16 +332,10 @@ func Test_RpaasApi(t *testing.T) { _, err = getReadyNginx(instanceName, namespaceName, 1, 1) require.NoError(t, err) - assertInstanceReturnsStatusCode := func(localPort int, expectedCode int) { - rsp, iErr := http.Get(fmt.Sprintf("http://127.0.0.1:%d/", localPort)) - require.NoError(t, iErr) - assert.Equal(t, expectedCode, rsp.StatusCode) - } - ctx, cancel = context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturnsStatusCode(localPort, http.StatusOK) + assertInstanceContains(t, localPort, http.StatusOK, "") }) require.NoError(t, err) @@ -364,7 +350,7 @@ func Test_RpaasApi(t *testing.T) { ctx, cancel = context.WithTimeout(context.Background(), time.Minute) defer cancel() err = portForward(ctx, namespaceName, serviceName, servicePort, func(localPort int) { - assertInstanceReturns(localPort, "instance not bound\n") + assertInstanceContains(t, localPort, http.StatusNotFound, "instance not bound") }) require.NoError(t, err) diff --git a/test/testdata/hello-app.yaml b/test/testdata/hello-app.yaml index 4dd44636c..13e27bcaf 100644 --- a/test/testdata/hello-app.yaml +++ b/test/testdata/hello-app.yaml @@ -13,7 +13,7 @@ spec: spec: containers: - name: hello - image: gcr.io/hello-minikube-zero-install/hello-node + image: k8s.gcr.io/echoserver:1.4 --- apiVersion: v1 kind: Service diff --git a/test/testdata/rpaas-full.yaml b/test/testdata/rpaas-full.yaml index 2e740ef99..942f46dbe 100644 --- a/test/testdata/rpaas-full.yaml +++ b/test/testdata/rpaas-full.yaml @@ -3,7 +3,7 @@ kind: RpaasPlan metadata: name: basic spec: - image: tsuru/nginx-tsuru:1.15.0 + image: tsuru/nginx-tsuru:1.16.1 config: {} resources: limits: diff --git a/test/testdata/rpaasplan-basic.yaml b/test/testdata/rpaasplan-basic.yaml index f7aaad28a..d0c45ecd9 100644 --- a/test/testdata/rpaasplan-basic.yaml +++ b/test/testdata/rpaasplan-basic.yaml @@ -3,7 +3,7 @@ kind: RpaasPlan metadata: name: basic spec: - image: tsuru/nginx-tsuru:1.15.0 + image: tsuru/nginx-tsuru:1.16.1 config: {} resources: requests: