diff --git a/cmd/managementapi/wire.go b/cmd/managementapi/wire.go index e6176d244..2ac490c74 100644 --- a/cmd/managementapi/wire.go +++ b/cmd/managementapi/wire.go @@ -53,6 +53,7 @@ func initializeManagementMux(ctx context.Context, conf config.Config) (*runtime. providers.ProvideDefinitionConstructors, // services + service.NewSchedulerManagerConfig, service.NewSchedulerManager, service.NewOperationManager, diff --git a/cmd/managementapi/wire_gen.go b/cmd/managementapi/wire_gen.go index 8624d92dc..22532f956 100644 --- a/cmd/managementapi/wire_gen.go +++ b/cmd/managementapi/wire_gen.go @@ -50,7 +50,11 @@ func initializeManagementMux(ctx context.Context, conf config.Config) (*runtime. if err != nil { return nil, err } - schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManagerConfig, err := service.NewSchedulerManagerConfig(conf) + if err != nil { + return nil, err + } + schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulerManagerConfig) schedulersHandler := handlers.ProvideSchedulersHandler(schedulerManager) operationsHandler := handlers.ProvideOperationsHandler(operationManager) serveMux := provideManagementMux(ctx, schedulersHandler, operationsHandler) diff --git a/cmd/runtimewatcher/wire.go b/cmd/runtimewatcher/wire.go index a6272fb67..f7639ff5e 100644 --- a/cmd/runtimewatcher/wire.go +++ b/cmd/runtimewatcher/wire.go @@ -31,7 +31,6 @@ import ( "github.com/topfreegames/maestro/internal/core/services/events" "github.com/topfreegames/maestro/internal/core/services/workers" "github.com/topfreegames/maestro/internal/core/worker" - workerconfigs "github.com/topfreegames/maestro/internal/core/worker/config" "github.com/topfreegames/maestro/internal/core/worker/runtimewatcher" "github.com/topfreegames/maestro/internal/service" ) @@ -43,24 +42,16 @@ func provideRuntimeWatcherBuilder() *worker.WorkerBuilder { } } -func provideRuntimeWatcherConfig(c config.Config) *workerconfigs.RuntimeWatcherConfig { - return &workerconfigs.RuntimeWatcherConfig{ - DisruptionWorkerIntervalSeconds: c.GetDuration("runtimeWatcher.disruptionWorker.intervalSeconds"), - DisruptionSafetyPercentage: c.GetFloat64("runtimeWatcher.disruptionWorker.safetyPercentage"), - } -} - var WorkerOptionsSet = wire.NewSet( service.NewRuntimeKubernetes, - service.NewRoomStorageRedis, RoomManagerSet, - provideRuntimeWatcherConfig, - wire.Struct(new(worker.WorkerOptions), "Runtime", "RoomStorage", "RoomManager", "RuntimeWatcherConfig")) + wire.Struct(new(worker.WorkerOptions), "RoomManager", "Runtime")) var RoomManagerSet = wire.NewSet( service.NewSchedulerStoragePg, service.NewClockTime, service.NewPortAllocatorRandom, + service.NewRoomStorageRedis, service.NewGameRoomInstanceStorageRedis, service.NewSchedulerCacheRedis, service.NewRoomManagerConfig, diff --git a/cmd/runtimewatcher/wire_gen.go b/cmd/runtimewatcher/wire_gen.go index c9e33e7be..67e028115 100644 --- a/cmd/runtimewatcher/wire_gen.go +++ b/cmd/runtimewatcher/wire_gen.go @@ -12,7 +12,6 @@ import ( "github.com/topfreegames/maestro/internal/core/services/events" "github.com/topfreegames/maestro/internal/core/services/workers" "github.com/topfreegames/maestro/internal/core/worker" - config2 "github.com/topfreegames/maestro/internal/core/worker/config" "github.com/topfreegames/maestro/internal/core/worker/runtimewatcher" "github.com/topfreegames/maestro/internal/service" ) @@ -25,7 +24,8 @@ func initializeRuntimeWatcher(c config.Config) (*workers.WorkersManager, error) if err != nil { return nil, err } - runtime, err := service.NewRuntimeKubernetes(c) + clock := service.NewClockTime() + portAllocator, err := service.NewPortAllocatorRandom(c) if err != nil { return nil, err } @@ -33,12 +33,11 @@ func initializeRuntimeWatcher(c config.Config) (*workers.WorkersManager, error) if err != nil { return nil, err } - clock := service.NewClockTime() - portAllocator, err := service.NewPortAllocatorRandom(c) + gameRoomInstanceStorage, err := service.NewGameRoomInstanceStorageRedis(c) if err != nil { return nil, err } - gameRoomInstanceStorage, err := service.NewGameRoomInstanceStorageRedis(c) + runtime, err := service.NewRuntimeKubernetes(c) if err != nil { return nil, err } @@ -60,12 +59,9 @@ func initializeRuntimeWatcher(c config.Config) (*workers.WorkersManager, error) return nil, err } roomManager := service.NewRoomManager(clock, portAllocator, roomStorage, gameRoomInstanceStorage, runtime, eventsService, roomManagerConfig) - runtimeWatcherConfig := provideRuntimeWatcherConfig(c) workerOptions := &worker.WorkerOptions{ - Runtime: runtime, - RoomStorage: roomStorage, - RoomManager: roomManager, - RuntimeWatcherConfig: runtimeWatcherConfig, + RoomManager: roomManager, + Runtime: runtime, } workersManager := workers.NewWorkersManager(workerBuilder, c, schedulerStorage, workerOptions) return workersManager, nil @@ -80,14 +76,6 @@ func provideRuntimeWatcherBuilder() *worker.WorkerBuilder { } } -func provideRuntimeWatcherConfig(c config.Config) *config2.RuntimeWatcherConfig { - return &config2.RuntimeWatcherConfig{ - DisruptionWorkerIntervalSeconds: c.GetDuration("runtimeWatcher.disruptionWorker.intervalSeconds"), - DisruptionSafetyPercentage: c.GetFloat64("runtimeWatcher.disruptionWorker.safetyPercentage"), - } -} - -var WorkerOptionsSet = wire.NewSet(service.NewRuntimeKubernetes, service.NewRoomStorageRedis, RoomManagerSet, - provideRuntimeWatcherConfig, wire.Struct(new(worker.WorkerOptions), "Runtime", "RoomStorage", "RoomManager", "RuntimeWatcherConfig")) +var WorkerOptionsSet = wire.NewSet(service.NewRuntimeKubernetes, RoomManagerSet, wire.Struct(new(worker.WorkerOptions), "RoomManager", "Runtime")) -var RoomManagerSet = wire.NewSet(service.NewSchedulerStoragePg, service.NewClockTime, service.NewPortAllocatorRandom, service.NewGameRoomInstanceStorageRedis, service.NewSchedulerCacheRedis, service.NewRoomManagerConfig, service.NewRoomManager, service.NewEventsForwarder, events.NewEventsForwarderService, service.NewEventsForwarderServiceConfig) +var RoomManagerSet = wire.NewSet(service.NewSchedulerStoragePg, service.NewClockTime, service.NewPortAllocatorRandom, service.NewRoomStorageRedis, service.NewGameRoomInstanceStorageRedis, service.NewSchedulerCacheRedis, service.NewRoomManagerConfig, service.NewRoomManager, service.NewEventsForwarder, events.NewEventsForwarderService, service.NewEventsForwarderServiceConfig) diff --git a/cmd/worker/wire.go b/cmd/worker/wire.go index dfa4f11a9..ee693ccaf 100644 --- a/cmd/worker/wire.go +++ b/cmd/worker/wire.go @@ -70,6 +70,7 @@ func initializeWorker(c config.Config, builder *worker.WorkerBuilder) (*workerss worker.ProvideWorkerOptions, workersservice.NewWorkersManager, service.NewSchedulerManager, + service.NewSchedulerManagerConfig, ) return &workersservice.WorkersManager{}, nil diff --git a/cmd/worker/wire_gen.go b/cmd/worker/wire_gen.go index b6de22f7d..ea1da74ad 100644 --- a/cmd/worker/wire_gen.go +++ b/cmd/worker/wire_gen.go @@ -75,7 +75,11 @@ func initializeWorker(c config.Config, builder *worker.WorkerBuilder) (*workers. return nil, err } roomManager := service.NewRoomManager(clock, portAllocator, roomStorage, gameRoomInstanceStorage, runtime, eventsService, roomManagerConfig) - schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManagerConfig, err := service.NewSchedulerManagerConfig(c) + if err != nil { + return nil, err + } + schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulerManagerConfig) policyMap := service.NewPolicyMap(roomStorage) autoscaler := service.NewAutoscaler(policyMap) newversionConfig := service.NewCreateSchedulerVersionConfig(c) diff --git a/config/config.yaml b/config/config.yaml index c1d1ba268..bdcc71bfb 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -74,6 +74,8 @@ operations: limit: 1000 services: + schedulerManager: + defaultPdbMaxUnavailable: "5%" roomManager: roomPingTimeoutMillis: 240000 roomInitializationTimeoutMillis: 120000 diff --git a/docs/reference/Scheduler.md b/docs/reference/Scheduler.md index d30f742a9..d85e393de 100644 --- a/docs/reference/Scheduler.md +++ b/docs/reference/Scheduler.md @@ -203,7 +203,8 @@ autoscaling: "metadata": {} } } - ] + ], + "pdbMaxUnavailable": "5%", "autoscaling": { "enabled": true, "min": 10, @@ -234,6 +235,7 @@ forwarders: Forwarders autoscaling: Autoscaling spec: Spec annotation: Map +pdbMaxUnavailable: String ``` - **Name**: Scheduler name. This name is unique and will be the same name used for the kubernetes namespace. It's @@ -254,6 +256,7 @@ annotation: Map used by them, limits and images. More info [here](#spec). - **annotations**: Allows annotations for the scheduler's game room. Know more about annotations on Kubernetes [here](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations) +- **pdbMaxUnavailable**: Defines the disruption budget for game rooms. Optional and defaults to 5%. Value can be defined as a string representing the % between 0 and 100, "15%", or a raw number of rooms "100". ### PortRange The **PortRange** is used to select a random port for a GRU between **start** and **end**. @@ -403,4 +406,43 @@ It is represented as: - **name**: Name of the port. Facilitates on recognition; - **protocol**: Port protocol. Can be UDP, TCP or SCTP.; - **port**: The port exposed. -- **hostPortRange**: The [port range](#portrange) for the port to be allocated in the host. Mutually exclusive with the port range configured in the root structure. \ No newline at end of file +- **hostPortRange**: The [port range](#portrange) for the port to be allocated in the host. Mutually exclusive with the port range configured in the root structure. + +#### PDB Max Unavailable + +A string value that defines the disruption budget of Game Rooms from a specific scheduler. +Maestro will create a [PDB Resource](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) +to prevent evictions drastically impacting availability of the Game Rooms. + +By default this value is set to 5%, so at worst runtime can evit 5% of the pods. There is no way to control +what pods will be evicted - if it prefers ready, pending, etc. + +The configuration can be specified with this order of precedence: + +1. Value specified in Scheduler's definition + +```json +{ + "pdbMaxUnavailable": "10%" +} +``` + +2. Value specified in the ENV VAR: + +```shell +MAESTRO_SERVICES_SCHEDULERMANAGER_DEFAULTPDBMAXUNAVAILABLE="10%" +``` + +3. Value specified in the [config.yaml](../../config/config.yaml): + +```yaml +services: + schedulerManager: + defaultPdbMaxUnavailable: "5%" +``` + +4. Value specified in [code](../../internal/core/entities/pdb/pdb.go) that defaults to 5%: + +```go +const DefaultPdbMaxUnavailablePercentage = "5%" +``` diff --git a/internal/adapters/runtime/kubernetes/scheduler.go b/internal/adapters/runtime/kubernetes/scheduler.go index 2ac5ee082..8b051b8e3 100644 --- a/internal/adapters/runtime/kubernetes/scheduler.go +++ b/internal/adapters/runtime/kubernetes/scheduler.go @@ -27,19 +27,18 @@ import ( "strconv" "github.com/topfreegames/maestro/internal/core/entities" + pdbEntity "github.com/topfreegames/maestro/internal/core/entities/pdb" "github.com/topfreegames/maestro/internal/core/ports/errors" "go.uber.org/zap" v1 "k8s.io/api/core/v1" v1Policy "k8s.io/api/policy/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) const ( - DefaultDisruptionSafetyPercentage float64 = 0.05 - MajorKubeVersionPDB int = 1 - MinorKubeVersionPDB int = 21 + MajorKubeVersionPDB int = 1 + MinorKubeVersionPDB int = 21 ) func (k *kubernetes) isPDBSupported() bool { @@ -96,10 +95,7 @@ func (k *kubernetes) createPDBFromScheduler(ctx context.Context, scheduler *enti }, }, Spec: v1Policy.PodDisruptionBudgetSpec{ - MinAvailable: &intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(0), - }, + MaxUnavailable: pdbEntity.ConvertStrToSpec(scheduler.PdbMaxUnavailable), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "maestro-scheduler": scheduler.Name, @@ -173,29 +169,8 @@ func (k *kubernetes) DeleteScheduler(ctx context.Context, scheduler *entities.Sc return nil } -func (k *kubernetes) MitigateDisruption( - ctx context.Context, - scheduler *entities.Scheduler, - roomAmount int, - safetyPercentage float64, -) error { - if scheduler == nil { - return errors.NewErrInvalidArgument("empty pointer received for scheduler, can not mitigate disruptions") - } - - incSafetyPercentage := 1.0 - if safetyPercentage < DefaultDisruptionSafetyPercentage { - k.logger.Warn( - "invalid safety percentage, using default percentage", - zap.Float64("safetyPercentage", safetyPercentage), - zap.Float64("DefaultDisruptionSafetyPercentage", DefaultDisruptionSafetyPercentage), - ) - safetyPercentage = DefaultDisruptionSafetyPercentage - } - incSafetyPercentage += safetyPercentage - - // For kubernetes mitigating disruptions means updating the current PDB - // minAvailable to the number of occupied rooms if above a threshold +func (k *kubernetes) UpdateScheduler(ctx context.Context, scheduler *entities.Scheduler) error { + // Check if PDB exists, if not, create it pdb, err := k.clientSet.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) if err != nil && !kerrors.IsNotFound(err) { // Non-recoverable errors @@ -209,31 +184,8 @@ func (k *kubernetes) MitigateDisruption( } } - var currentPdbMinAvailable int32 - // PDB might exist and is based on MaxUnavailable - if pdb.Spec.MinAvailable != nil { - currentPdbMinAvailable = pdb.Spec.MinAvailable.IntVal - } - - if currentPdbMinAvailable == int32(float64(roomAmount)*incSafetyPercentage) { - return nil - } - - // In theory, the PDB object can be changed in the runtime in the meantime after - // fetching initial state/ask for creation (beginning of the function) and before - // updating the value. This should never happen in production because there is only - // one agent setting this PDB in the namespace and it's the worker. However, on tests - // we were seeing intermittent failures running parallel cases, hence why adding this - // code it is safer to update the PDB object - pdb, err = k.clientSet.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - if err != nil || pdb == nil { - return errors.NewErrUnexpected("non recoverable error when getting PDB for scheduler '%s': %s", scheduler.Name, err) - } pdb.Spec = v1Policy.PodDisruptionBudgetSpec{ - MinAvailable: &intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(float64(roomAmount) * incSafetyPercentage), - }, + MaxUnavailable: pdbEntity.ConvertStrToSpec(scheduler.PdbMaxUnavailable), Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "maestro-scheduler": scheduler.Name, @@ -243,7 +195,7 @@ func (k *kubernetes) MitigateDisruption( _, err = k.clientSet.PolicyV1().PodDisruptionBudgets(scheduler.Name).Update(ctx, pdb, metav1.UpdateOptions{}) if err != nil { - return errors.NewErrUnexpected("error updating PDB to mitigate disruptions for scheduler '%s': %s", scheduler.Name, err) + return errors.NewErrUnexpected("error updating PDB for scheduler '%s': %s", scheduler.Name, err) } return nil diff --git a/internal/adapters/runtime/kubernetes/scheduler_test.go b/internal/adapters/runtime/kubernetes/scheduler_test.go index 1bd065c7d..c80fff3f1 100644 --- a/internal/adapters/runtime/kubernetes/scheduler_test.go +++ b/internal/adapters/runtime/kubernetes/scheduler_test.go @@ -28,18 +28,14 @@ package kubernetes import ( "context" "testing" - "time" "github.com/stretchr/testify/require" "github.com/topfreegames/maestro/internal/core/entities" - "github.com/topfreegames/maestro/internal/core/entities/autoscaling" "github.com/topfreegames/maestro/internal/core/ports/errors" "github.com/topfreegames/maestro/test" v1 "k8s.io/api/core/v1" - v1Policy "k8s.io/api/policy/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) func TestSchedulerCreation(t *testing.T) { @@ -48,7 +44,7 @@ func TestSchedulerCreation(t *testing.T) { kubernetesRuntime := New(client) t.Run("create single scheduler", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "single-scheduler-test"} + scheduler := &entities.Scheduler{Name: "single-scheduler-test", PdbMaxUnavailable: "5%"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) require.NoError(t, err) @@ -57,7 +53,7 @@ func TestSchedulerCreation(t *testing.T) { }) t.Run("fail to create scheduler with the same name", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "conflict-scheduler-test"} + scheduler := &entities.Scheduler{Name: "conflict-scheduler-test", PdbMaxUnavailable: "5%"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) require.NoError(t, err) @@ -73,7 +69,7 @@ func TestSchedulerDeletion(t *testing.T) { kubernetesRuntime := New(client) t.Run("delete scheduler", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "delete-scheduler-test"} + scheduler := &entities.Scheduler{Name: "delete-scheduler-test", PdbMaxUnavailable: "5%"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) require.NoError(t, err) @@ -86,7 +82,7 @@ func TestSchedulerDeletion(t *testing.T) { }) t.Run("fail to delete inexistent scheduler", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "delete-inexistent-scheduler-test"} + scheduler := &entities.Scheduler{Name: "delete-inexistent-scheduler-test", PdbMaxUnavailable: "5%"} err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) require.Error(t, err) require.ErrorIs(t, err, errors.ErrNotFound) @@ -104,7 +100,7 @@ func TestPDBCreationAndDeletion(t *testing.T) { t.SkipNow() } - scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-no-autoscaling"} + scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-no-autoscaling", PdbMaxUnavailable: "5%"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) if err != nil { require.ErrorIs(t, errors.ErrAlreadyExists, err) @@ -123,61 +119,20 @@ func TestPDBCreationAndDeletion(t *testing.T) { require.NotNil(t, pdb.Spec) require.NotNil(t, pdb.Spec.Selector) require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) + require.Equal(t, pdb.Spec.MaxUnavailable.StrVal, "5%") require.Contains(t, pdb.Spec.Selector.MatchLabels, "maestro-scheduler") require.Contains(t, pdb.Spec.Selector.MatchLabels["maestro-scheduler"], scheduler.Name) require.Contains(t, pdb.Labels, "app.kubernetes.io/managed-by") require.Contains(t, pdb.Labels["app.kubernetes.io/managed-by"], "maestro") }) - t.Run("pdb should not use scheduler min as minAvailable", func(t *testing.T) { - if !kubernetesRuntime.isPDBSupported() { - t.Log("Kubernetes version does not support PDB, skipping") - t.SkipNow() - } - - scheduler := &entities.Scheduler{ - Name: "scheduler-pdb-test-with-autoscaling", - Autoscaling: &autoscaling.Autoscaling{ - Enabled: true, - Min: 2, - Max: 3, - Policy: autoscaling.Policy{ - Type: autoscaling.RoomOccupancy, - Parameters: autoscaling.PolicyParameters{ - RoomOccupancy: &autoscaling.RoomOccupancyParams{ - ReadyTarget: 0.1, - }, - }, - }, - }, - } - err := kubernetesRuntime.CreateScheduler(ctx, scheduler) - if err != nil { - require.ErrorIs(t, errors.ErrAlreadyExists, err) - } - - defer func() { - err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) - if err != nil { - require.ErrorIs(t, errors.ErrNotFound, err) - } - }() - - pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) - }) - t.Run("delete pdb on scheduler deletion", func(t *testing.T) { if !kubernetesRuntime.isPDBSupported() { t.Log("Kubernetes version does not support PDB, skipping") t.SkipNow() } - scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-delete"} + scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-delete", PdbMaxUnavailable: "5%"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) if err != nil { require.ErrorIs(t, errors.ErrAlreadyExists, err) @@ -187,7 +142,6 @@ func TestPDBCreationAndDeletion(t *testing.T) { require.NoError(t, err) require.NotNil(t, pdb) require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) err = kubernetesRuntime.DeleteScheduler(ctx, scheduler) if err != nil { @@ -198,193 +152,3 @@ func TestPDBCreationAndDeletion(t *testing.T) { require.True(t, kerrors.IsNotFound(err)) }) } - -func TestMitigateDisruption(t *testing.T) { - ctx := context.Background() - client := test.GetKubernetesClientSet(t, kubernetesContainer) - kubernetesRuntime := New(client) - - t.Run("should not mitigate disruption if scheduler is nil", func(t *testing.T) { - err := kubernetesRuntime.MitigateDisruption(ctx, nil, 0, 0.0) - require.ErrorIs(t, errors.ErrInvalidArgument, err) - }) - - t.Run("should create PDB on mitigatation if not created before", func(t *testing.T) { - if !kubernetesRuntime.isPDBSupported() { - t.Log("Kubernetes version does not support PDB, skipping") - t.SkipNow() - } - - scheduler := &entities.Scheduler{Name: "scheduler-pdb-mitigation-create"} - namespace := &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scheduler.Name, - }, - } - - _, err := client.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) - require.NoError(t, err) - - err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, 0, 0.0) - require.NoError(t, err) - - pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) - }) - - t.Run("should update PDB on mitigation if not equal to current value", func(t *testing.T) { - if !kubernetesRuntime.isPDBSupported() { - t.Log("Kubernetes version does not support PDB, skipping") - t.SkipNow() - } - - scheduler := &entities.Scheduler{ - Name: "scheduler-pdb-mitigation-update", - } - err := kubernetesRuntime.CreateScheduler(ctx, scheduler) - if err != nil { - require.ErrorIs(t, errors.ErrAlreadyExists, err) - } - - defer func() { - err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) - if err != nil { - require.ErrorIs(t, errors.ErrNotFound, err) - } - }() - - pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) - - occupiedRooms := 100 - err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, occupiedRooms, 0.0) - require.NoError(t, err) - - pdb, err = client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - - incSafetyPercentage := 1.0 + DefaultDisruptionSafetyPercentage - newRoomAmount := int32(float64(occupiedRooms) * incSafetyPercentage) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, newRoomAmount) - }) - - t.Run("should default safety percentage if invalid value", func(t *testing.T) { - if !kubernetesRuntime.isPDBSupported() { - t.Log("Kubernetes version does not support PDB, skipping") - t.SkipNow() - } - - scheduler := &entities.Scheduler{ - Name: "scheduler-pdb-mitigation-no-update", - Autoscaling: &autoscaling.Autoscaling{ - Enabled: true, - Min: 100, - Max: 200, - Policy: autoscaling.Policy{ - Type: autoscaling.RoomOccupancy, - Parameters: autoscaling.PolicyParameters{ - RoomOccupancy: &autoscaling.RoomOccupancyParams{ - ReadyTarget: 0.1, - }, - }, - }, - }, - } - err := kubernetesRuntime.CreateScheduler(ctx, scheduler) - if err != nil { - require.ErrorIs(t, errors.ErrAlreadyExists, err) - } - - defer func() { - err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) - if err != nil { - require.ErrorIs(t, errors.ErrNotFound, err) - } - }() - - time.Sleep(time.Millisecond * 100) - pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) - - newValue := 100 - err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, newValue, 0.0) - require.NoError(t, err) - - time.Sleep(time.Millisecond * 100) - pdb, err = client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - - incSafetyPercentage := 1.0 + DefaultDisruptionSafetyPercentage - require.Equal(t, int32(float64(newValue)*incSafetyPercentage), pdb.Spec.MinAvailable.IntVal) - }) - - t.Run("should clear maxUnavailable and set minAvailable if existing PDB uses maxUnavailable", func(t *testing.T) { - if !kubernetesRuntime.isPDBSupported() { - t.Log("Kubernetes version does not support PDB, skipping") - t.SkipNow() - } - scheduler := &entities.Scheduler{ - Name: "scheduler-pdb-max-unavailable", - } - pdbSpec := &v1Policy.PodDisruptionBudget{ - ObjectMeta: metav1.ObjectMeta{ - Name: scheduler.Name, - Labels: map[string]string{ - "app.kubernetes.io/managed-by": "maestro", - }, - }, - Spec: v1Policy.PodDisruptionBudgetSpec{ - MaxUnavailable: &intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(10), - }, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "maestro-scheduler": scheduler.Name, - }, - }, - }, - } - namespace := &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: scheduler.Name, - }, - } - - _, err := client.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) - require.NoError(t, err) - - pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Create(ctx, pdbSpec, metav1.CreateOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Spec.MaxUnavailable.IntVal, int32(10)) - - occupiedRooms := 100 - err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, occupiedRooms, 0.0) - require.NoError(t, err) - - pdb, err = client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) - require.NoError(t, err) - require.NotNil(t, pdb) - require.Equal(t, pdb.Name, scheduler.Name) - - incSafetyPercentage := 1.0 + DefaultDisruptionSafetyPercentage - newRoomAmount := int32(float64(occupiedRooms) * incSafetyPercentage) - require.Equal(t, pdb.Spec.MinAvailable.IntVal, newRoomAmount) - require.Nil(t, pdb.Spec.MaxUnavailable) - - }) -} diff --git a/internal/adapters/storage/postgres/scheduler/db_scheduler.go b/internal/adapters/storage/postgres/scheduler/db_scheduler.go index 6b5c30811..8322d8b83 100644 --- a/internal/adapters/storage/postgres/scheduler/db_scheduler.go +++ b/internal/adapters/storage/postgres/scheduler/db_scheduler.go @@ -57,6 +57,7 @@ type schedulerInfo struct { TerminationGracePeriod time.Duration Toleration string Affinity string + PdbMaxUnavailable string Containers []game_room.Container PortRange *port.PortRange MaxSurge string @@ -78,6 +79,7 @@ func NewDBScheduler(scheduler *entities.Scheduler) *Scheduler { MaxSurge: scheduler.MaxSurge, RoomsReplicas: scheduler.RoomsReplicas, Forwarders: scheduler.Forwarders, + PdbMaxUnavailable: scheduler.PdbMaxUnavailable, Autoscaling: scheduler.Autoscaling, Annotations: scheduler.Annotations, Labels: scheduler.Labels, @@ -113,13 +115,14 @@ func (s *Scheduler) ToScheduler() (*entities.Scheduler, error) { Affinity: info.Affinity, Containers: info.Containers, }, - PortRange: info.PortRange, - RollbackVersion: s.RollbackVersion, - CreatedAt: s.CreatedAt.Time, - LastDownscaleAt: info.LastDownscaleAt, - MaxSurge: info.MaxSurge, - RoomsReplicas: info.RoomsReplicas, - Forwarders: info.Forwarders, - Autoscaling: info.Autoscaling, + PortRange: info.PortRange, + RollbackVersion: s.RollbackVersion, + CreatedAt: s.CreatedAt.Time, + LastDownscaleAt: info.LastDownscaleAt, + MaxSurge: info.MaxSurge, + RoomsReplicas: info.RoomsReplicas, + Forwarders: info.Forwarders, + PdbMaxUnavailable: info.PdbMaxUnavailable, + Autoscaling: info.Autoscaling, }, nil } diff --git a/internal/api/handlers/requestadapters/schedulers.go b/internal/api/handlers/requestadapters/schedulers.go index e5f20761b..40496d2ca 100644 --- a/internal/api/handlers/requestadapters/schedulers.go +++ b/internal/api/handlers/requestadapters/schedulers.go @@ -72,6 +72,10 @@ func FromApiPatchSchedulerRequestToChangeMap(request *api.PatchSchedulerRequest) patchMap[patch.LabelSchedulerForwarders] = fromApiForwarders(request.GetForwarders()) } + if request.PdbMaxUnavailable != "" { + patchMap[patch.LabelPDBMaxUnavailable] = request.GetPdbMaxUnavailable() + } + if request.Annotations != nil { patchMap[patch.LabelAnnotations] = request.GetAnnotations() } @@ -126,6 +130,7 @@ func FromApiCreateSchedulerRequestToEntity(request *api.CreateSchedulerRequest) int(request.GetRoomsReplicas()), schedulerAutoscaling, fromApiForwarders(request.GetForwarders()), + request.GetPdbMaxUnavailable(), request.GetAnnotations(), request.GetLabels(), ) @@ -163,6 +168,7 @@ func FromApiNewSchedulerVersionRequestToEntity(request *api.NewSchedulerVersionR int(request.GetRoomsReplicas()), schedulerAutoscaling, fromApiForwarders(request.GetForwarders()), + request.GetPdbMaxUnavailable(), request.GetAnnotations(), request.GetLabels(), ) @@ -176,18 +182,19 @@ func FromEntitySchedulerToResponse(entity *entities.Scheduler) (*api.Scheduler, } return &api.Scheduler{ - Name: entity.Name, - Game: entity.Game, - State: entity.State, - PortRange: getPortRange(entity.PortRange), - CreatedAt: timestamppb.New(entity.CreatedAt), - MaxSurge: entity.MaxSurge, - RoomsReplicas: int32(entity.RoomsReplicas), - Spec: getSpec(entity.Spec), - Autoscaling: getAutoscaling(entity.Autoscaling), - Forwarders: forwarders, - Annotations: entity.Annotations, - Labels: entity.Labels, + Name: entity.Name, + Game: entity.Game, + State: entity.State, + PortRange: getPortRange(entity.PortRange), + CreatedAt: timestamppb.New(entity.CreatedAt), + MaxSurge: entity.MaxSurge, + RoomsReplicas: int32(entity.RoomsReplicas), + Spec: getSpec(entity.Spec), + Autoscaling: getAutoscaling(entity.Autoscaling), + Forwarders: forwarders, + PdbMaxUnavailable: entity.PdbMaxUnavailable, + Annotations: entity.Annotations, + Labels: entity.Labels, }, nil } diff --git a/internal/api/handlers/requestadapters/schedulers_test.go b/internal/api/handlers/requestadapters/schedulers_test.go index 8720734ff..a823a8a57 100644 --- a/internal/api/handlers/requestadapters/schedulers_test.go +++ b/internal/api/handlers/requestadapters/schedulers_test.go @@ -330,6 +330,7 @@ func TestFromApiPatchSchedulerRequestToChangeMap(t *testing.T) { }, }, }, + { Title: "only autoscaling should convert api.PatchSchedulerRequest to change map", Input: Input{ @@ -573,8 +574,9 @@ func TestFromApiCreateSchedulerRequestToEntity(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "42%", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -674,8 +676,9 @@ func TestFromApiCreateSchedulerRequestToEntity(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "42%", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, @@ -1228,8 +1231,9 @@ func TestFromApiNewSchedulerVersionRequestToEntity(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "12", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -1329,8 +1333,9 @@ func TestFromApiNewSchedulerVersionRequestToEntity(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "12", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, @@ -1463,8 +1468,9 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "10%", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -1563,8 +1569,9 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "10%", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, @@ -1644,8 +1651,9 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -1730,8 +1738,9 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "", + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, diff --git a/internal/api/handlers/schedulers_handler_test.go b/internal/api/handlers/schedulers_handler_test.go index 4bc50f0b1..e407d53db 100644 --- a/internal/api/handlers/schedulers_handler_test.go +++ b/internal/api/handlers/schedulers_handler_test.go @@ -69,7 +69,7 @@ func TestListSchedulers(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), &filters.SchedulerFilter{Name: schedulerName, Game: game, Version: version}).Return([]*entities.Scheduler{ { @@ -115,7 +115,7 @@ func TestListSchedulers(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return([]*entities.Scheduler{}, nil) @@ -146,7 +146,7 @@ func TestListSchedulers(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("GetSchedulersWithFilter error")) @@ -203,7 +203,7 @@ func TestGetScheduler(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(nil, schedulerCache, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(nil, schedulerCache, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) scheduler := &entities.Scheduler{ Name: "zooba-us", @@ -272,8 +272,9 @@ func TestGetScheduler(t *testing.T) { }, }, }, - Annotations: map[string]string{}, - Labels: map[string]string{}, + PdbMaxUnavailable: "10%", + Annotations: map[string]string{}, + Labels: map[string]string{}, } schedulerCache.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(scheduler, nil) @@ -303,7 +304,7 @@ func TestGetScheduler(t *testing.T) { schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerCache.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("scheduler NonExistentSchedule not found")) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("scheduler NonExistentSchedule not found")) @@ -336,7 +337,7 @@ func TestGetScheduler(t *testing.T) { schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerCache.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrInvalidArgument("Error")) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrInvalidArgument("Error")) @@ -371,7 +372,7 @@ func TestGetSchedulerVersions(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) createdAtV1, _ := time.Parse(time.RFC3339Nano, "2020-01-01T00:00:00.001Z") createdAtV2, _ := time.Parse(time.RFC3339Nano, "2020-01-01T00:00:00.001Z") @@ -412,7 +413,7 @@ func TestGetSchedulerVersions(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulerVersions(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("scheduler NonExistentScheduler not found")) @@ -438,7 +439,7 @@ func TestGetSchedulerVersions(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulerVersions(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrInvalidArgument("Error")) @@ -475,7 +476,7 @@ func TestCreateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) scheduler := &entities.Scheduler{ Name: "scheduler-name-1", @@ -542,8 +543,9 @@ func TestCreateScheduler(t *testing.T) { }, }, }, - Annotations: map[string]string{"imageregistry": "https://docker.hub.com/"}, - Labels: map[string]string{"scheduler": "scheduler-name"}, + PdbMaxUnavailable: "10%", + Annotations: map[string]string{"imageregistry": "https://docker.hub.com/"}, + Labels: map[string]string{"scheduler": "scheduler-name"}, } schedulerStorage.EXPECT().CreateScheduler(gomock.Any(), gomock.Any()).Do( @@ -588,7 +590,7 @@ func TestCreateScheduler(t *testing.T) { }) t.Run("with failure", func(t *testing.T) { - schedulerManager := schedulers.NewSchedulerManager(nil, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(nil, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) mux := runtime.NewServeMux() err := api.RegisterSchedulersServiceHandlerServer(context.Background(), mux, ProvideSchedulersHandler(schedulerManager)) @@ -634,7 +636,7 @@ func TestCreateScheduler(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().CreateScheduler(gomock.Any(), gomock.Any()).Return(errors.NewErrAlreadyExists("error creating scheduler %s: name already exists", "scheduler")) @@ -680,7 +682,7 @@ func TestNewSchedulerVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(&operation.Operation{ID: "id-1"}, nil) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), "scheduler-name-1").Return(currentScheduler, nil) @@ -711,7 +713,7 @@ func TestNewSchedulerVersion(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), "scheduler-name-1").Return(nil, errors.NewErrNotFound("err")) @@ -744,7 +746,7 @@ func TestNewSchedulerVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(nil, errors.NewErrUnexpected("storage offline")) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), "scheduler-name-1").Return(currentScheduler, nil) @@ -780,7 +782,7 @@ func TestSwitchActiveVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(&operation.Operation{ID: "id-1"}, nil) @@ -809,7 +811,7 @@ func TestSwitchActiveVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(nil, errors.NewErrUnexpected("internal error")) @@ -834,7 +836,7 @@ func TestGetSchedulersInfo(t *testing.T) { schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) scheduler := newValidScheduler() scheduler.Autoscaling = &autoscaling.Autoscaling{ @@ -871,7 +873,7 @@ func TestGetSchedulersInfo(t *testing.T) { t.Run("with valid request and no scheduler and game rooms found", func(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("err")) mux := runtime.NewServeMux() @@ -895,7 +897,7 @@ func TestGetSchedulersInfo(t *testing.T) { t.Run("with unknown error", func(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrUnexpected("exception")) mux := runtime.NewServeMux() @@ -1092,7 +1094,7 @@ func TestPatchScheduler(t *testing.T) { schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) operationManager := mock.NewMockOperationManager(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, operationManager, nil) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, operationManager, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT(). GetScheduler(gomock.Any(), "scheduler-name-1"). diff --git a/internal/core/entities/pdb/pdb.go b/internal/core/entities/pdb/pdb.go new file mode 100644 index 000000000..98c5d39e4 --- /dev/null +++ b/internal/core/entities/pdb/pdb.go @@ -0,0 +1,53 @@ +// MIT License +// +// Copyright (c) 2021 TFG Co +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package pdb + +import ( + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +const DefaultPdbMaxUnavailablePercentage = "5%" + +func ConvertStrToSpec(val string) *intstr.IntOrString { + if val == "" { + return nil + } + if strings.HasSuffix(val, "%") { + return &intstr.IntOrString{ + Type: intstr.String, + StrVal: val, + } + } else { + intVal, err := strconv.Atoi(val) + if err != nil { + return nil + } + return &intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(intVal), + } + } +} diff --git a/internal/core/entities/scheduler.go b/internal/core/entities/scheduler.go index 465ac22dc..0f1126ab1 100644 --- a/internal/core/entities/scheduler.go +++ b/internal/core/entities/scheduler.go @@ -60,20 +60,21 @@ var ( // Scheduler represents one of the basic maestro structs. // It holds GameRooms specifications, as well as optional events forwarders. type Scheduler struct { - Name string `validate:"required,kube_resource_name"` - Game string `validate:"required"` - State string `validate:"required"` - RollbackVersion string - Spec game_room.Spec - Autoscaling *autoscaling.Autoscaling - PortRange *port.PortRange - RoomsReplicas int `validate:"min=0"` - CreatedAt time.Time - LastDownscaleAt time.Time - MaxSurge string `validate:"required,max_surge"` - Forwarders []*forwarder.Forwarder `validate:"dive"` - Annotations map[string]string - Labels map[string]string + Name string `validate:"required,kube_resource_name"` + Game string `validate:"required"` + State string `validate:"required"` + RollbackVersion string + Spec game_room.Spec + Autoscaling *autoscaling.Autoscaling + PortRange *port.PortRange + RoomsReplicas int `validate:"min=0"` + CreatedAt time.Time + LastDownscaleAt time.Time + MaxSurge string `validate:"required,max_surge"` + Forwarders []*forwarder.Forwarder `validate:"dive"` + PdbMaxUnavailable string `validate:"pdb_max_unavailable"` + Annotations map[string]string + Labels map[string]string } // NewScheduler instantiate a new scheduler struct. @@ -87,21 +88,23 @@ func NewScheduler( roomsReplicas int, autoscaling *autoscaling.Autoscaling, forwarders []*forwarder.Forwarder, + pdbMaxUnavailable string, annotations map[string]string, labels map[string]string, ) (*Scheduler, error) { scheduler := &Scheduler{ - Name: name, - Game: game, - State: state, - Spec: spec, - PortRange: portRange, - MaxSurge: maxSurge, - RoomsReplicas: roomsReplicas, - Autoscaling: autoscaling, - Forwarders: forwarders, - Annotations: annotations, - Labels: labels, + Name: name, + Game: game, + State: state, + Spec: spec, + PortRange: portRange, + MaxSurge: maxSurge, + RoomsReplicas: roomsReplicas, + Autoscaling: autoscaling, + Forwarders: forwarders, + PdbMaxUnavailable: pdbMaxUnavailable, + Annotations: annotations, + Labels: labels, } return scheduler, scheduler.Validate() } @@ -159,6 +162,7 @@ func (s *Scheduler) IsMajorVersion(newScheduler *Scheduler) bool { "MaxSurge", "RoomsReplicas", "Autoscaling", + "PdbMaxUnavailable", ), ) } diff --git a/internal/core/entities/scheduler_test.go b/internal/core/entities/scheduler_test.go index 6ae1b4e67..e0db0160d 100644 --- a/internal/core/entities/scheduler_test.go +++ b/internal/core/entities/scheduler_test.go @@ -93,6 +93,7 @@ func TestNewScheduler(t *testing.T) { forwarders := []*forwarder.Forwarder{fwd} annotations := map[string]string{"imageregistry": "https://hub.docker.com/"} labels := map[string]string{"scheduler": "scheduler-name"} + pdbMaxUnavailable := "5%" t.Run("with success when create valid scheduler", func(t *testing.T) { scheduler, err := entities.NewScheduler( @@ -105,21 +106,23 @@ func TestNewScheduler(t *testing.T) { roomsReplicas, nil, forwarders, + pdbMaxUnavailable, annotations, labels) expectedScheduler := &entities.Scheduler{ - Name: name, - Game: game, - MaxSurge: maxSurge, - State: entities.StateCreating, - Spec: spec, - PortRange: portRange, - RoomsReplicas: roomsReplicas, - Autoscaling: nil, - Forwarders: forwarders, - Annotations: annotations, - Labels: labels, + Name: name, + Game: game, + MaxSurge: maxSurge, + State: entities.StateCreating, + Spec: spec, + PortRange: portRange, + RoomsReplicas: roomsReplicas, + Autoscaling: nil, + Forwarders: forwarders, + PdbMaxUnavailable: pdbMaxUnavailable, + Annotations: annotations, + Labels: labels, } require.NoError(t, err) @@ -136,7 +139,10 @@ func TestNewScheduler(t *testing.T) { portRange, roomsReplicas, nil, - forwarders, annotations, labels) + forwarders, + pdbMaxUnavailable, + annotations, + labels) require.Error(t, err) }) @@ -160,7 +166,10 @@ func TestNewScheduler(t *testing.T) { ), 0, nil, - forwarders, annotations, labels) + forwarders, + pdbMaxUnavailable, + annotations, + labels) require.Error(t, err) }) @@ -184,7 +193,10 @@ func TestNewScheduler(t *testing.T) { ), -1, nil, - forwarders, annotations, labels) + forwarders, + pdbMaxUnavailable, + annotations, + labels) require.Error(t, err) }) diff --git a/internal/core/operations/providers/operation_providers.go b/internal/core/operations/providers/operation_providers.go index bda7ce910..26aec3d74 100644 --- a/internal/core/operations/providers/operation_providers.go +++ b/internal/core/operations/providers/operation_providers.go @@ -96,7 +96,7 @@ func ProvideExecutors( executors[removerooms.OperationName] = removerooms.NewExecutor(roomManager, roomStorage, operationManager, schedulerManager) executors[test.OperationName] = test.NewExecutor() executors[switchversion.OperationName] = switchversion.NewExecutor(schedulerManager, operationManager) - executors[newversion.OperationName] = newversion.NewExecutor(roomManager, schedulerManager, operationManager, newSchedulerVersionConfig) + executors[newversion.OperationName] = newversion.NewExecutor(runtime, roomManager, schedulerManager, operationManager, newSchedulerVersionConfig) executors[healthcontroller.OperationName] = healthcontroller.NewExecutor(roomStorage, roomManager, instanceStorage, schedulerStorage, operationManager, autoscaler, healthControllerConfig) executors[storagecleanup.OperationName] = storagecleanup.NewExecutor(operationStorage) executors[deletescheduler.OperationName] = deletescheduler.NewExecutor(schedulerStorage, schedulerCache, instanceStorage, operationStorage, operationManager, runtime) diff --git a/internal/core/operations/schedulers/create/executor.go b/internal/core/operations/schedulers/create/executor.go index c5584bb05..e3c4e34a5 100644 --- a/internal/core/operations/schedulers/create/executor.go +++ b/internal/core/operations/schedulers/create/executor.go @@ -62,8 +62,7 @@ func (e *Executor) Execute(ctx context.Context, op *operation.Operation, definit if !ok { return fmt.Errorf("invalid operation definition for %s operation", e.Name()) } - - err := e.runtime.CreateScheduler(ctx, &entities.Scheduler{Name: op.SchedulerName}) + err := e.runtime.CreateScheduler(ctx, &entities.Scheduler{Name: op.SchedulerName, PdbMaxUnavailable: opDef.NewScheduler.PdbMaxUnavailable}) if err != nil { logger.Error("error creating scheduler in runtime", zap.Error(err)) createSchedulerErr := fmt.Errorf("error creating scheduler in runtime: %w", err) diff --git a/internal/core/operations/schedulers/newversion/executor.go b/internal/core/operations/schedulers/newversion/executor.go index e2f78f6f4..9838330e2 100644 --- a/internal/core/operations/schedulers/newversion/executor.go +++ b/internal/core/operations/schedulers/newversion/executor.go @@ -50,6 +50,7 @@ type Config struct { // Executor holds the dependecies to execute the operation to create a new scheduler version. type Executor struct { + runtime ports.Runtime roomManager ports.RoomManager schedulerManager ports.SchedulerManager operationManager ports.OperationManager @@ -60,8 +61,9 @@ type Executor struct { var _ operations.Executor = (*Executor)(nil) // NewExecutor instantiate a new scheduler version executor. -func NewExecutor(roomManager ports.RoomManager, schedulerManager ports.SchedulerManager, operationManager ports.OperationManager, config Config) *Executor { +func NewExecutor(runtime ports.Runtime, roomManager ports.RoomManager, schedulerManager ports.SchedulerManager, operationManager ports.OperationManager, config Config) *Executor { return &Executor{ + runtime: runtime, roomManager: roomManager, schedulerManager: schedulerManager, operationManager: operationManager, @@ -124,6 +126,13 @@ func (ex *Executor) Execute(ctx context.Context, op *operation.Operation, defini return err } + if newScheduler.PdbMaxUnavailable != currentActiveScheduler.PdbMaxUnavailable { + err = ex.runtime.UpdateScheduler(ctx, newScheduler) + if err != nil { + logger.Warn("error updating scheduler PDB", zap.Error(err)) + } + } + ex.operationManager.AppendOperationEventToExecutionHistory(ctx, op, fmt.Sprintf(enqueuedSwitchVersionMessageTemplate, switchOpID)) logger.Sugar().Infof("new scheduler version created: %s, is major: %t", newScheduler.Spec.Version, isSchedulerMajorVersion) logger.Sugar().Infof("%s operation succeded, %s operation enqueued to continue scheduler update process, switching to version %s", opDef.Name(), switchversion.OperationName, newScheduler.Spec.Version) diff --git a/internal/core/operations/schedulers/newversion/executor_test.go b/internal/core/operations/schedulers/newversion/executor_test.go index abb9ea587..93417a428 100644 --- a/internal/core/operations/schedulers/newversion/executor_test.go +++ b/internal/core/operations/schedulers/newversion/executor_test.go @@ -69,13 +69,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}, {Version: "v1.1.0"}, {Version: "v1.2.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -120,13 +121,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 3, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}, {Version: "v1.1.0"}, {Version: "v1.2.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -174,13 +176,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -225,13 +228,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.1.0"}, {Version: "v1.2.0"}, {Version: "v1.3.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -276,13 +280,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.1.0"}, {Version: "v1.2.0"}, {Version: "v1.3.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -326,12 +331,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return([]*entities.SchedulerVersion{}, errors.NewErrUnexpected("some_error")) @@ -358,13 +364,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) schedulerVersions := []*entities.SchedulerVersion{{Version: "v-----"}} config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) @@ -391,12 +398,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -433,12 +441,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 3, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -480,13 +489,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 3, } ctx, cancelFn := context.WithCancel(context.Background()) - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}, {Version: "v1.1.0"}, {Version: "v1.2.0"}} @@ -522,12 +532,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -566,12 +577,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -617,12 +629,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -663,13 +676,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v4.2.0"}} @@ -707,13 +721,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v1.3.0"}, {Version: "v1.5.0"}} @@ -751,13 +766,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v2.1.0"}, {Version: "v3.5.0"}} @@ -794,12 +810,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return([]*entities.SchedulerVersion{}, errors.NewErrUnexpected("some_error")) @@ -825,13 +842,14 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) schedulerVersions := []*entities.SchedulerVersion{{Version: "v-----"}} config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) @@ -857,12 +875,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) newSchedulerWithNewVersion := newScheduler newSchedulerWithNewVersion.Spec.Version = "v1.1.0" newSchedulerWithNewVersion.RollbackVersion = "v1.0.0" @@ -894,12 +913,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) newSchedulerWithNewVersion := newScheduler newSchedulerWithNewVersion.Spec.Version = "v1.1.0" newSchedulerWithNewVersion.RollbackVersion = "v1.0.0" @@ -925,12 +945,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) newSchedulerWithNewVersion := newScheduler newSchedulerWithNewVersion.Spec.Version = "v1.1.0" newSchedulerWithNewVersion.RollbackVersion = "v1.0.0" @@ -956,12 +977,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) // mocks for SchedulerManager GetActiveScheduler method schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) @@ -994,12 +1016,13 @@ func TestExecutor_Rollback(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) executor.AddValidationRoomID(newScheduler.Name, &game_room.GameRoom{ID: "room1"}) roomManager.EXPECT().DeleteRoom(gomock.Any(), gomock.Any(), remove.NewVersionRollback).Return(nil) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(&newScheduler, nil) @@ -1023,12 +1046,13 @@ func TestExecutor_Rollback(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) executor.AddValidationRoomID(newScheduler.Name, &game_room.GameRoom{ID: "room1"}) roomManager.EXPECT().DeleteRoom(gomock.Any(), gomock.Any(), remove.NewVersionRollback).Return(errors.NewErrUnexpected("some error")) result := executor.Rollback(context.Background(), op, operationDef, nil) @@ -1050,12 +1074,13 @@ func TestExecutor_Rollback(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(&newScheduler, nil) schedulerManager.EXPECT().UpdateScheduler(gomock.Any(), &newScheduler).Return(nil) result := executor.Rollback(context.Background(), op, operationDef, nil) @@ -1063,6 +1088,84 @@ func TestExecutor_Rollback(t *testing.T) { require.Nil(t, result) }) + t.Run("should call runtime to update max unavailable if it has changed", func(t *testing.T) { + mockCtrl := gomock.NewController(t) + + currentActiveScheduler := newValidSchedulerWithImageVersion("image-v1") + currentActiveScheduler.PdbMaxUnavailable = "5%" + newScheduler := *newValidSchedulerWithImageVersion("image-v1") + newScheduler.PdbMaxUnavailable = "10%" + op := &operation.Operation{ + ID: "123", + Status: operation.StatusInProgress, + DefinitionName: newversion.OperationName, + SchedulerName: newScheduler.Name, + } + operationDef := &newversion.Definition{NewScheduler: &newScheduler} + roomManager := mockports.NewMockRoomManager(mockCtrl) + schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) + operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) + switchOpID := "switch-active-version-op-id" + config := newversion.Config{ + RoomInitializationTimeout: time.Duration(120000), + RoomValidationAttempts: 1, + } + schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}} + + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + + schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) + schedulerManager.EXPECT().UpdateScheduler(gomock.Any(), currentActiveScheduler).Return(nil) + schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) + schedulerManager.EXPECT().CreateNewSchedulerVersionAndEnqueueSwitchVersion(gomock.Any(), gomock.Any()).Return(switchOpID, nil) + runtime.EXPECT().UpdateScheduler(gomock.Any(), &newScheduler).Return(nil) + operationsManager.EXPECT().AppendOperationEventToExecutionHistory(gomock.Any(), op, fmt.Sprintf("enqueued switch active version operation with id: %s", switchOpID)) + + result := executor.Execute(context.Background(), op, operationDef) + + require.Nil(t, result) + }) + + t.Run("should not call runtime to update max unavailable if it has not changed", func(t *testing.T) { + mockCtrl := gomock.NewController(t) + + currentActiveScheduler := newValidSchedulerWithImageVersion("image-v1") + currentActiveScheduler.PdbMaxUnavailable = "5%" + newScheduler := *newValidSchedulerWithImageVersion("image-v1") + newScheduler.PdbMaxUnavailable = "5%" + newScheduler.MaxSurge = "42" + op := &operation.Operation{ + ID: "123", + Status: operation.StatusInProgress, + DefinitionName: newversion.OperationName, + SchedulerName: newScheduler.Name, + } + operationDef := &newversion.Definition{NewScheduler: &newScheduler} + roomManager := mockports.NewMockRoomManager(mockCtrl) + schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) + operationsManager := mockports.NewMockOperationManager(mockCtrl) + runtime := mockports.NewMockRuntime(mockCtrl) + switchOpID := "switch-active-version-op-id" + config := newversion.Config{ + RoomInitializationTimeout: time.Duration(120000), + RoomValidationAttempts: 1, + } + schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}} + + executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + + schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) + schedulerManager.EXPECT().UpdateScheduler(gomock.Any(), currentActiveScheduler).Return(nil) + schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) + schedulerManager.EXPECT().CreateNewSchedulerVersionAndEnqueueSwitchVersion(gomock.Any(), gomock.Any()).Return(switchOpID, nil) + runtime.EXPECT().UpdateScheduler(gomock.Any(), &newScheduler).Times(0) + operationsManager.EXPECT().AppendOperationEventToExecutionHistory(gomock.Any(), op, fmt.Sprintf("enqueued switch active version operation with id: %s", switchOpID)) + + result := executor.Execute(context.Background(), op, operationDef) + + require.Nil(t, result) + }) } func newValidSchedulerWithImageVersion(imageVersion string) *entities.Scheduler { diff --git a/internal/core/ports/mock/runtime_mock.go b/internal/core/ports/mock/runtime_mock.go index c70dbc8d8..2cd83809e 100644 --- a/internal/core/ports/mock/runtime_mock.go +++ b/internal/core/ports/mock/runtime_mock.go @@ -109,18 +109,18 @@ func (mr *MockRuntimeMockRecorder) DeleteScheduler(ctx, scheduler interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScheduler", reflect.TypeOf((*MockRuntime)(nil).DeleteScheduler), ctx, scheduler) } -// MitigateDisruption mocks base method. -func (m *MockRuntime) MitigateDisruption(ctx context.Context, scheduler *entities.Scheduler, roomAmount int, safetyPercentage float64) error { +// UpdateScheduler mocks base method. +func (m *MockRuntime) UpdateScheduler(ctx context.Context, scheduler *entities.Scheduler) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MitigateDisruption", ctx, scheduler, roomAmount, safetyPercentage) + ret := m.ctrl.Call(m, "UpdateScheduler", ctx, scheduler) ret0, _ := ret[0].(error) return ret0 } -// MitigateDisruption indicates an expected call of MitigateDisruption. -func (mr *MockRuntimeMockRecorder) MitigateDisruption(ctx, scheduler, roomAmount, safetyPercentage interface{}) *gomock.Call { +// UpdateScheduler indicates an expected call of UpdateScheduler. +func (mr *MockRuntimeMockRecorder) UpdateScheduler(ctx, scheduler interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MitigateDisruption", reflect.TypeOf((*MockRuntime)(nil).MitigateDisruption), ctx, scheduler, roomAmount, safetyPercentage) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockRuntime)(nil).UpdateScheduler), ctx, scheduler) } // WatchGameRoomInstances mocks base method. diff --git a/internal/core/ports/runtime.go b/internal/core/ports/runtime.go index 39f5e22f0..2a3f64906 100644 --- a/internal/core/ports/runtime.go +++ b/internal/core/ports/runtime.go @@ -36,6 +36,8 @@ type Runtime interface { CreateScheduler(ctx context.Context, scheduler *entities.Scheduler) error // DeleteScheduler Deletes a scheduler on the runtime. DeleteScheduler(ctx context.Context, scheduler *entities.Scheduler) error + // UpdateScheduler Updates the scheduler on the runtime. + UpdateScheduler(ctx context.Context, scheduler *entities.Scheduler) error // CreateGameRoomInstance Creates a game room instance on the runtime using // the specification provided. CreateGameRoomInstance(ctx context.Context, scheduler *entities.Scheduler, gameRoomName string, spec game_room.Spec) (*game_room.Instance, error) @@ -45,8 +47,6 @@ type Runtime interface { WatchGameRoomInstances(ctx context.Context, scheduler *entities.Scheduler) (RuntimeWatcher, error) // CreateGameRoomName Creates a name to the room. CreateGameRoomName(ctx context.Context, scheduler entities.Scheduler) (string, error) - // Apply changes to runtime to mitigate disruptions looking at current number of rooms - MitigateDisruption(ctx context.Context, scheduler *entities.Scheduler, roomAmount int, safetyPercentage float64) error } // RuntimeWatcher defines a process of watcher, it will have a chan with the diff --git a/internal/core/services/schedulers/patch/patch_scheduler.go b/internal/core/services/schedulers/patch/patch_scheduler.go index 4824b8889..fea77c679 100644 --- a/internal/core/services/schedulers/patch/patch_scheduler.go +++ b/internal/core/services/schedulers/patch/patch_scheduler.go @@ -49,7 +49,8 @@ const ( LabelAutoscaling = "autoscaling" // LabelSchedulerForwarders is the forwarders key in the patch map. LabelSchedulerForwarders = "forwarders" - + // LabelPDBMaxUnavailable is the PDB's maxUnavailable spec key in the patch map. + LabelPDBMaxUnavailable = "pdbMaxUnavailable" // LabelSpecTerminationGracePeriod is the termination grace period key in the patch map. LabelSpecTerminationGracePeriod = "termination_grace_period" // LabelSpecContainers is the containers key in the patch map. @@ -114,6 +115,10 @@ func PatchScheduler(scheduler entities.Scheduler, patchMap map[string]interface{ } } + if _, ok := patchMap[LabelPDBMaxUnavailable]; ok { + scheduler.PdbMaxUnavailable = fmt.Sprint(patchMap[LabelPDBMaxUnavailable]) + } + if _, ok := patchMap[LabelSchedulerSpec]; ok { var patchSpecMap map[string]interface{} if patchSpecMap, ok = patchMap[LabelSchedulerSpec].(map[string]interface{}); !ok { diff --git a/internal/core/services/schedulers/scheduler_manager.go b/internal/core/services/schedulers/scheduler_manager.go index 2b0ebb5f8..94bd2db05 100644 --- a/internal/core/services/schedulers/scheduler_manager.go +++ b/internal/core/services/schedulers/scheduler_manager.go @@ -48,17 +48,19 @@ type SchedulerManager struct { schedulerCache ports.SchedulerCache operationManager ports.OperationManager roomStorage ports.RoomStorage + config SchedulerManagerConfig logger *zap.Logger } var _ ports.SchedulerManager = (*SchedulerManager)(nil) -func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage) *SchedulerManager { +func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage, config SchedulerManagerConfig) *SchedulerManager { return &SchedulerManager{ schedulerStorage: schedulerStorage, operationManager: operationManager, schedulerCache: schedulerCache, roomStorage: roomStorage, + config: config, logger: zap.L().With(zap.String(logs.LogFieldComponent, "service"), zap.String(logs.LogFieldServiceName, "scheduler_manager")), } } @@ -89,6 +91,10 @@ func (s *SchedulerManager) CreateScheduler(ctx context.Context, scheduler *entit return nil, fmt.Errorf("failing in creating schedule: %w", err) } + if scheduler.PdbMaxUnavailable == "" { + scheduler.PdbMaxUnavailable = s.config.DefaultPdbMaxUnavailable + } + err = s.schedulerStorage.CreateScheduler(ctx, scheduler) if err != nil { return nil, err @@ -158,6 +164,10 @@ func (s *SchedulerManager) PatchSchedulerAndCreateNewSchedulerVersionOperation(c return nil, portsErrors.NewErrInvalidArgument("error patching scheduler: %s", err.Error()) } + if scheduler.PdbMaxUnavailable == "" { + scheduler.PdbMaxUnavailable = s.config.DefaultPdbMaxUnavailable + } + if err := scheduler.Validate(); err != nil { return nil, portsErrors.NewErrInvalidArgument("invalid patched scheduler: %s", err.Error()) } @@ -194,6 +204,9 @@ func (s *SchedulerManager) EnqueueNewSchedulerVersionOperation(ctx context.Conte } scheduler.Spec.Version = currentScheduler.Spec.Version + if scheduler.PdbMaxUnavailable == "" { + scheduler.PdbMaxUnavailable = s.config.DefaultPdbMaxUnavailable + } err = scheduler.Validate() if err != nil { return nil, err diff --git a/internal/core/worker/config/runtime_watcher_config.go b/internal/core/services/schedulers/scheduler_manager_config.go similarity index 88% rename from internal/core/worker/config/runtime_watcher_config.go rename to internal/core/services/schedulers/scheduler_manager_config.go index a86c287d5..2a21011eb 100644 --- a/internal/core/worker/config/runtime_watcher_config.go +++ b/internal/core/services/schedulers/scheduler_manager_config.go @@ -20,11 +20,8 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package config +package schedulers -import "time" - -type RuntimeWatcherConfig struct { - DisruptionWorkerIntervalSeconds time.Duration - DisruptionSafetyPercentage float64 +type SchedulerManagerConfig struct { + DefaultPdbMaxUnavailable string `validate:"required,pdb_max_unavailable"` } diff --git a/internal/core/services/schedulers/scheduler_manager_test.go b/internal/core/services/schedulers/scheduler_manager_test.go index fe7bcc020..5d9edfeba 100644 --- a/internal/core/services/schedulers/scheduler_manager_test.go +++ b/internal/core/services/schedulers/scheduler_manager_test.go @@ -69,7 +69,7 @@ func TestCreateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) t.Run("with valid scheduler it returns no error when creating it", func(t *testing.T) { scheduler := newValidScheduler() @@ -128,7 +128,7 @@ func TestCreateNewSchedulerVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) t.Run("with valid scheduler it returns no error when creating it", func(t *testing.T) { scheduler := newValidScheduler() @@ -174,7 +174,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(&operation.Operation{}, nil) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -195,7 +195,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -213,7 +213,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.NewErrUnexpected("some_error")) @@ -230,7 +230,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(nil, errors.NewErrUnexpected("storage offline")) @@ -262,7 +262,7 @@ func TestEnqueueSwitchActiveVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(&operation.Operation{}, nil) @@ -285,7 +285,7 @@ func TestEnqueueSwitchActiveVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(nil, errors.NewErrUnexpected("storage offline")) @@ -314,7 +314,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, opDef).Return(&operation.Operation{}, nil) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -335,7 +335,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, opDef).Return(&operation.Operation{}, nil) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.ErrNotFound) @@ -357,7 +357,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, opDef).Return(nil, errors.NewErrUnexpected("storage offline")) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -377,7 +377,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.ErrNotFound) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.ErrNotFound) @@ -401,7 +401,7 @@ func TestGetSchedulerVersions(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulerVersions(ctx, scheduler.Name).Return(schedulerVersionList, nil) @@ -419,7 +419,7 @@ func TestGetSchedulerVersions(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulerVersions(ctx, scheduler.Name).Return(nil, errors.NewErrNotFound("scheduler not found")) @@ -440,7 +440,7 @@ func TestGetSchedulerByVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulerWithFilter(ctx, &filters.SchedulerFilter{ Name: scheduler.Name, @@ -460,7 +460,7 @@ func TestGetSchedulerByVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulerWithFilter(ctx, &filters.SchedulerFilter{ Name: scheduler.Name, @@ -484,7 +484,7 @@ func TestGetScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerFilter := &filters.SchedulerFilter{ Name: scheduler.Name, @@ -506,7 +506,7 @@ func TestGetScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerFilter := &filters.SchedulerFilter{ Name: scheduler.Name, @@ -533,7 +533,7 @@ func TestGetSchedulersWithFilter(t *testing.T) { roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulers := []*entities.Scheduler{scheduler} schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(ctx, gomock.Any()).Return(schedulers, nil) @@ -550,7 +550,7 @@ func TestGetSchedulersWithFilter(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetSchedulersWithFilter(ctx, gomock.Any()).Return(nil, errors.NewErrUnexpected("some error")) @@ -576,7 +576,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().UpdateScheduler(ctx, scheduler).Return(nil) schedulerCache.EXPECT().DeleteScheduler(ctx, scheduler.Name).Return(nil) @@ -594,7 +594,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().UpdateScheduler(ctx, scheduler).Return(nil) schedulerCache.EXPECT().DeleteScheduler(ctx, scheduler.Name).Return(errors.NewErrUnexpected("error")) @@ -612,7 +612,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().UpdateScheduler(ctx, scheduler).Return(errors.NewErrUnexpected("error")) @@ -627,7 +627,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) err := schedulerManager.CreateNewSchedulerVersion(ctx, scheduler) @@ -641,7 +641,7 @@ func TestGetSchedulersInfo(t *testing.T) { ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerFilter := filters.SchedulerFilter{Game: "Tennis-Clash"} scheduler := newValidScheduler() schedulers := []*entities.Scheduler{scheduler} @@ -666,7 +666,7 @@ func TestGetSchedulersInfo(t *testing.T) { ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerFilter := filters.SchedulerFilter{Game: "Tennis-Clash"} schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("err")) @@ -682,7 +682,7 @@ func TestGetSchedulersInfo(t *testing.T) { ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerFilter := filters.SchedulerFilter{Game: "Tennis-Clash"} scheduler := newValidScheduler() schedulers := []*entities.Scheduler{scheduler} @@ -701,7 +701,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("with valid request it returns a scheduler and game rooms information (no autoscaling)", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(10, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(15, nil) @@ -721,7 +721,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("with valid request it returns a scheduler and game rooms information and autoscaling", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(10, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(15, nil) @@ -750,7 +750,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in ready state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, errors.NewErrUnexpected("err")) scheduler := newValidScheduler() @@ -763,7 +763,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in pending state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, errors.NewErrUnexpected("err")) scheduler := newValidScheduler() @@ -777,7 +777,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in occupied state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, errors.NewErrUnexpected("err")) @@ -792,7 +792,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in terminating state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) @@ -813,7 +813,7 @@ func TestDeleteScheduler(t *testing.T) { scheduler := newValidScheduler() ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), schedulerName).Return(scheduler, nil) schedulerStorage.EXPECT().DeleteScheduler(gomock.Any(), ports.TransactionID(""), scheduler).Return(nil) @@ -826,7 +826,7 @@ func TestDeleteScheduler(t *testing.T) { schedulerName := "scheduler-name" ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), schedulerName).Return(nil, errors.NewErrNotFound("err")) err := schedulerManager.DeleteScheduler(ctx, schedulerName) @@ -841,7 +841,7 @@ func TestDeleteScheduler(t *testing.T) { scheduler := newValidScheduler() ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), schedulerName).Return(scheduler, nil) schedulerStorage.EXPECT().DeleteScheduler(gomock.Any(), ports.TransactionID(""), scheduler).Return(errors.NewErrUnexpected("err")) @@ -887,6 +887,7 @@ func TestPatchSchedulerAndSwitchActiveVersionOperation(t *testing.T) { GetSchedulerError: nil, ChangedSchedulerFunction: func() *entities.Scheduler { scheduler.MaxSurge = "12%" + scheduler.PdbMaxUnavailable = "5%" return scheduler }, CreateOperationReturn: &operation.Operation{ID: "some-id"}, @@ -971,6 +972,7 @@ func TestPatchSchedulerAndSwitchActiveVersionOperation(t *testing.T) { GetSchedulerError: nil, ChangedSchedulerFunction: func() *entities.Scheduler { scheduler.MaxSurge = "17%" + scheduler.PdbMaxUnavailable = "5%" return scheduler }, CreateOperationReturn: nil, @@ -1013,7 +1015,7 @@ func TestPatchSchedulerAndSwitchActiveVersionOperation(t *testing.T) { mockCtrl := gomock.NewController(t) mockOperationManager := mock.NewMockOperationManager(mockCtrl) mockSchedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(mockSchedulerStorage, nil, mockOperationManager, nil) + schedulerManager := NewSchedulerManager(mockSchedulerStorage, nil, mockOperationManager, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) mockSchedulerStorage.EXPECT().GetScheduler(gomock.Any(), scheduler.Name).Return(scheduler, testCase.ExpectedMock.GetSchedulerError) mockOperationManager.EXPECT(). diff --git a/internal/core/validations/validations.go b/internal/core/validations/validations.go index 382d78d9a..cbd9964aa 100644 --- a/internal/core/validations/validations.go +++ b/internal/core/validations/validations.go @@ -74,6 +74,31 @@ func IsMaxSurgeValid(maxSurge string) bool { return true } +// IsPdbMaxUnavailableValid check if PdbMaxUnavailable is valid. A PdbMaxUnavailable valid is either +// a string that can be converted to a number greater than 0, or a string with percentage that its +// value is greater than 0 and less than 100. Empty strigs are valid, we'll use the defualt value on +// SchedulerManager +func IsPdbMaxUnavailableValid(pdbMaxUnavailable string) bool { + if pdbMaxUnavailable == "" { + return true + } + + if strings.HasSuffix(pdbMaxUnavailable, "%") { + percentageValue, err := strconv.Atoi(strings.TrimSuffix(pdbMaxUnavailable, "%")) + if err != nil || percentageValue <= 0 || percentageValue >= 100 { + return false + } + return true + } + + numericValue, err := strconv.Atoi(pdbMaxUnavailable) + if err != nil || numericValue <= 0 { + return false + } + + return true +} + // IsImagePullPolicySupported check if received policy is supported by maestro func IsImagePullPolicySupported(policy string) bool { policies := []string{"Always", "Never", "IfNotPresent"} diff --git a/internal/core/worker/runtimewatcher/runtime_watcher_worker.go b/internal/core/worker/runtimewatcher/runtime_watcher_worker.go index fba846e10..b49e250d4 100644 --- a/internal/core/worker/runtimewatcher/runtime_watcher_worker.go +++ b/internal/core/worker/runtimewatcher/runtime_watcher_worker.go @@ -26,10 +26,8 @@ import ( "context" "fmt" "sync" - "time" "github.com/topfreegames/maestro/internal/core/logs" - "github.com/topfreegames/maestro/internal/core/worker/config" "github.com/topfreegames/maestro/internal/core/entities" "github.com/topfreegames/maestro/internal/core/entities/game_room" @@ -54,14 +52,12 @@ const ( type runtimeWatcherWorker struct { scheduler *entities.Scheduler roomManager ports.RoomManager - roomStorage ports.RoomStorage // TODO(gabrielcorado): should we access the port directly? do we need to // provide the same `Watcher` interface but on the RoomManager? runtime ports.Runtime logger *zap.Logger ctx context.Context cancelFunc context.CancelFunc - config *config.RuntimeWatcherConfig workerWaitGroup *sync.WaitGroup } @@ -69,11 +65,9 @@ func NewRuntimeWatcherWorker(scheduler *entities.Scheduler, opts *worker.WorkerO return &runtimeWatcherWorker{ scheduler: scheduler, roomManager: opts.RoomManager, - roomStorage: opts.RoomStorage, runtime: opts.Runtime, logger: zap.L().With(zap.String(logs.LogFieldServiceName, WorkerName), zap.String(logs.LogFieldSchedulerName, scheduler.Name)), workerWaitGroup: &sync.WaitGroup{}, - config: opts.RuntimeWatcherConfig, } } @@ -107,77 +101,6 @@ func (w *runtimeWatcherWorker) spawnUpdateRoomWatchers(resultChan chan game_room } } -func (w *runtimeWatcherWorker) mitigateDisruptions() error { - totalRoomsAmount, err := w.roomStorage.GetRoomCount(w.ctx, w.scheduler.Name) - if err != nil { - w.logger.Error( - "failed to get total rooms amount for scheduler", - zap.String("scheduler", w.scheduler.Name), - zap.Error(err), - ) - return nil - } - mitigationQuota := 0 - if totalRoomsAmount >= MinRoomsToApplyDisruption { - mitigationQuota, err = w.roomStorage.GetRoomCountByStatus(w.ctx, w.scheduler.Name, game_room.GameStatusOccupied) - if err != nil { - w.logger.Error( - "failed to get occupied rooms for scheduler", - zap.String("scheduler", w.scheduler.Name), - zap.Error(err), - ) - return nil - } - } - err = w.runtime.MitigateDisruption(w.ctx, w.scheduler, mitigationQuota, w.config.DisruptionSafetyPercentage) - if err != nil { - return err - } - w.logger.Debug( - "mitigated disruption for occupied rooms", - zap.String("scheduler", w.scheduler.Name), - zap.Int("mitigationQuota", mitigationQuota), - ) - - return nil -} - -func (w *runtimeWatcherWorker) spawnDisruptionWatcher() { - w.workerWaitGroup.Add(1) - - go func() { - defer w.workerWaitGroup.Done() - ticker := time.NewTicker(time.Second * w.config.DisruptionWorkerIntervalSeconds) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - err := w.mitigateDisruptions() - if err != nil { - w.logger.Error( - "unrecoverable error mitigating disruption", - zap.String("scheduler", w.scheduler.Name), - zap.Error(err), - ) - return - } - case <-w.ctx.Done(): - w.logger.Info("context closed, exiting disruption watcher") - return - } - } - - }() -} - -func (w *runtimeWatcherWorker) spawnWatchers( - resultChan chan game_room.InstanceEvent, -) { - w.spawnUpdateRoomWatchers(resultChan) - w.spawnDisruptionWatcher() -} - func (w *runtimeWatcherWorker) Start(ctx context.Context) error { w.logger.Info("starting runtime watcher", zap.String("scheduler", w.scheduler.Name)) watcher, err := w.runtime.WatchGameRoomInstances(ctx, w.scheduler) @@ -188,7 +111,7 @@ func (w *runtimeWatcherWorker) Start(ctx context.Context) error { w.ctx, w.cancelFunc = context.WithCancel(ctx) defer w.cancelFunc() - w.spawnWatchers(watcher.ResultChan()) + w.spawnUpdateRoomWatchers(watcher.ResultChan()) w.logger.Info("spawned all goroutines", zap.String("scheduler", w.scheduler.Name)) w.workerWaitGroup.Wait() diff --git a/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go b/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go index b7d0d3303..f92fa70d0 100644 --- a/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go +++ b/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go @@ -37,26 +37,19 @@ import ( "github.com/topfreegames/maestro/internal/core/entities" "github.com/topfreegames/maestro/internal/core/entities/game_room" porterrors "github.com/topfreegames/maestro/internal/core/ports/errors" - "github.com/topfreegames/maestro/internal/core/ports/mock" mockports "github.com/topfreegames/maestro/internal/core/ports/mock" "github.com/topfreegames/maestro/internal/core/worker" - "github.com/topfreegames/maestro/internal/core/worker/config" ) -func workerOptions(t *testing.T) (*gomock.Controller, *mockports.MockRuntime, *mockports.MockRoomManager, *mockports.MockRoomStorage, *worker.WorkerOptions) { +func workerOptions(t *testing.T) (*gomock.Controller, *mockports.MockRuntime, *mockports.MockRoomManager, *worker.WorkerOptions) { mockCtrl := gomock.NewController(t) runtime := mockports.NewMockRuntime(mockCtrl) roomManager := mockports.NewMockRoomManager(mockCtrl) - roomStorage := mock.NewMockRoomStorage(mockCtrl) - return mockCtrl, runtime, roomManager, roomStorage, &worker.WorkerOptions{ + return mockCtrl, runtime, roomManager, &worker.WorkerOptions{ Runtime: runtime, RoomManager: roomManager, - RoomStorage: roomStorage, - RuntimeWatcherConfig: &config.RuntimeWatcherConfig{ - DisruptionWorkerIntervalSeconds: 1, - }, } } @@ -68,7 +61,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { for _, event := range events { t.Run(fmt.Sprintf("when %s happens, updates instance", event.String()), func(t *testing.T) { - mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -109,7 +102,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run(fmt.Sprintf("when %s happens, and update instance fails, does nothing", event.String()), func(t *testing.T) { - mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -151,7 +144,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { } t.Run("fails to start watcher", func(t *testing.T) { - _, runtime, _, _, workerOptions := workerOptions(t) + _, runtime, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -174,7 +167,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run("clean room state on delete event", func(t *testing.T) { - mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -213,7 +206,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run("when clean room state fails, does nothing", func(t *testing.T) { - mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -252,7 +245,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run("when resultChan is closed, worker stops without error", func(t *testing.T) { - mockCtrl, runtime, _, roomStorage, workerOptions := workerOptions(t) + mockCtrl, runtime, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -263,11 +256,6 @@ func TestRuntimeWatcher_Start(t *testing.T) { runtimeWatcher.EXPECT().ResultChan().Return(resultChan) runtimeWatcher.EXPECT().Stop() - runtime.EXPECT().MitigateDisruption(gomock.Any(), gomock.Any(), 0, 0.0).Return(nil).MinTimes(0) - roomStorage.EXPECT().GetRoomCount(gomock.Any(), gomock.Any()).Return(MinRoomsToApplyDisruption, nil).MinTimes(0) - // We only call GetRoomCountByStatus to apply mitigation if there is more than 1 room - roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, nil).MinTimes(0) - ctx, cancelFunc := context.WithCancel(context.Background()) go func() { diff --git a/internal/core/worker/worker.go b/internal/core/worker/worker.go index 2f25dd533..1ed3ab837 100644 --- a/internal/core/worker/worker.go +++ b/internal/core/worker/worker.go @@ -55,7 +55,6 @@ type WorkerOptions struct { RoomStorage ports.RoomStorage InstanceStorage ports.GameRoomInstanceStorage MetricsReporterConfig *config.MetricsReporterConfig - RuntimeWatcherConfig *config.RuntimeWatcherConfig } // Configuration holds all worker configuration parameters. diff --git a/internal/service/adapters.go b/internal/service/adapters.go index ed5d6c104..ae3f8bd5f 100644 --- a/internal/service/adapters.go +++ b/internal/service/adapters.go @@ -98,8 +98,8 @@ const ( ) // NewSchedulerManager instantiates a new scheduler manager. -func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage) ports.SchedulerManager { - return schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) +func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage, config schedulers.SchedulerManagerConfig) ports.SchedulerManager { + return schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, config) } // NewOperationManager instantiates a new operation manager diff --git a/internal/service/config.go b/internal/service/config.go index 7f7fa67e5..cefb42711 100644 --- a/internal/service/config.go +++ b/internal/service/config.go @@ -25,12 +25,17 @@ package service import ( "time" + "github.com/topfreegames/maestro/internal/core/entities/pdb" + "github.com/topfreegames/maestro/internal/core/logs" "github.com/topfreegames/maestro/internal/core/operations/rooms/add" "github.com/topfreegames/maestro/internal/core/operations/schedulers/newversion" "github.com/topfreegames/maestro/internal/core/services/events" operationmanager "github.com/topfreegames/maestro/internal/core/services/operations" roommanager "github.com/topfreegames/maestro/internal/core/services/rooms" + schedulerManager "github.com/topfreegames/maestro/internal/core/services/schedulers" "github.com/topfreegames/maestro/internal/core/services/workers" + "github.com/topfreegames/maestro/internal/validations" + "go.uber.org/zap" "github.com/topfreegames/maestro/internal/core/operations/healthcontroller" "github.com/topfreegames/maestro/internal/core/worker" @@ -48,6 +53,7 @@ const ( operationLeaseTTLMillisConfigPath = "services.operationManager.operationLeaseTTLMillis" schedulerCacheTTLMillisConfigPath = "services.eventsForwarder.schedulerCacheTTLMillis" operationsRoomsAddLimitConfigPath = "operations.rooms.add.limit" + schedulerDefaultPdbMaxUnavailablePath = "services.schedulerManager.defaultPdbMaxUnavailable" ) // NewCreateSchedulerVersionConfig instantiate a new CreateSchedulerVersionConfig to be used by the NewSchedulerVersion operation to customize its configuration. @@ -105,6 +111,21 @@ func NewRoomManagerConfig(c config.Config) (roommanager.RoomManagerConfig, error return roomManagerConfig, nil } +// NewSchedulerManagerConfig instantiate a new SchedulerManagerConfig to be used by the SchedulerManager to customize its configuration. +func NewSchedulerManagerConfig(c config.Config) (schedulerManager.SchedulerManagerConfig, error) { + schedulerConfig := schedulerManager.SchedulerManagerConfig{ + DefaultPdbMaxUnavailable: c.GetString(schedulerDefaultPdbMaxUnavailablePath), + } + if err := validations.Validate.Struct(schedulerConfig); err != nil { + zap.L().With(zap.String(logs.LogFieldComponent, "service"), zap.String(logs.LogFieldServiceName, "config")).Info( + "error parsing default pdb max unavailable, using default", + zap.String("Default const PDB: ", pdb.DefaultPdbMaxUnavailablePercentage), + ) + schedulerConfig.DefaultPdbMaxUnavailable = pdb.DefaultPdbMaxUnavailablePercentage + } + return schedulerConfig, nil +} + // NewWorkersConfig instantiate a new workers Config stucture to be used by the workers to customize them from the config package. func NewWorkersConfig(c config.Config) (worker.Configuration, error) { healthControllerExecutionInterval := c.GetDuration(healthControllerExecutionIntervalConfigPath) diff --git a/internal/validations/validations.go b/internal/validations/validations.go index df53035ba..8e93783ca 100644 --- a/internal/validations/validations.go +++ b/internal/validations/validations.go @@ -81,6 +81,12 @@ func RegisterValidations() error { } addTranslation(Validate, "max_surge", "{0} must be a number greater than zero or a number greater than zero with suffix '%'") + err = Validate.RegisterValidation("pdb_max_unavailable", pdbMaxUnavailableValidate) + if err != nil { + return errors.New("could not register pdbMaxUnavailableValidate") + } + addTranslation(Validate, "pdb_max_unavailable", "{0} must be either an empty string (accept default value), a number greater than zero or a percentage greater than zero and less than 100 with suffix '%'") + err = Validate.RegisterValidation("kube_resource_name", kubeResourceNameValidate) if err != nil { return errors.New("could not register kubeResourceNameValidate") @@ -140,6 +146,10 @@ func maxSurgeValidate(fl validator.FieldLevel) bool { return validations.IsMaxSurgeValid(fl.Field().String()) } +func pdbMaxUnavailableValidate(fl validator.FieldLevel) bool { + return validations.IsPdbMaxUnavailableValid(fl.Field().String()) +} + func semanticValidate(fl validator.FieldLevel) bool { return validations.IsVersionValid(fl.Field().String()) } diff --git a/pkg/api/v1/messages.pb.go b/pkg/api/v1/messages.pb.go index 4bbc6f264..4caa0a891 100644 --- a/pkg/api/v1/messages.pb.go +++ b/pkg/api/v1/messages.pb.go @@ -875,10 +875,12 @@ type Scheduler struct { Autoscaling *Autoscaling `protobuf:"bytes,9,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,10,rep,name=forwarders,proto3" json:"forwarders,omitempty"` + // PDB MaxUnavailable Specification + PdbMaxUnavailable string `protobuf:"bytes,11,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // List with annotations - Annotations map[string]string `protobuf:"bytes,11,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,12,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // List with labels - Labels map[string]string `protobuf:"bytes,12,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,13,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Scheduler) Reset() { @@ -983,6 +985,13 @@ func (x *Scheduler) GetForwarders() []*Forwarder { return nil } +func (x *Scheduler) GetPdbMaxUnavailable() string { + if x != nil { + return x.PdbMaxUnavailable + } + return "" +} + func (x *Scheduler) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -2320,7 +2329,7 @@ var file_api_v1_messages_proto_rawDesc = []byte{ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x22, - 0x93, 0x05, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0xc3, 0x05, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, @@ -2344,12 +2353,15 @@ var file_api_v1_messages_proto_rawDesc = []byte{ 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x44, 0x0a, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, + 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, + 0x13, 0x70, 0x64, 0x62, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, + 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x44, 0x0a, + 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, + 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, diff --git a/pkg/api/v1/schedulers.pb.go b/pkg/api/v1/schedulers.pb.go index 9f19c4c92..b65ab01f0 100644 --- a/pkg/api/v1/schedulers.pb.go +++ b/pkg/api/v1/schedulers.pb.go @@ -209,10 +209,12 @@ type CreateSchedulerRequest struct { Autoscaling *Autoscaling `protobuf:"bytes,7,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,8,rep,name=forwarders,proto3" json:"forwarders,omitempty"` + // PDB MaxUnavailable Specification + PdbMaxUnavailable string `protobuf:"bytes,9,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // Add annotations for scheduler - Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,10,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Add labels for scheduler - Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *CreateSchedulerRequest) Reset() { @@ -303,6 +305,13 @@ func (x *CreateSchedulerRequest) GetForwarders() []*Forwarder { return nil } +func (x *CreateSchedulerRequest) GetPdbMaxUnavailable() string { + if x != nil { + return x.PdbMaxUnavailable + } + return "" +} + func (x *CreateSchedulerRequest) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -446,10 +455,12 @@ type NewSchedulerVersionRequest struct { Autoscaling *Autoscaling `protobuf:"bytes,7,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,8,rep,name=forwarders,proto3" json:"forwarders,omitempty"` + // PDB MaxUnavailable Specification + PdbMaxUnavailable string `protobuf:"bytes,9,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // New annotations for scheduler - Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,10,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // New labels for scheduler - Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *NewSchedulerVersionRequest) Reset() { @@ -540,6 +551,13 @@ func (x *NewSchedulerVersionRequest) GetForwarders() []*Forwarder { return nil } +func (x *NewSchedulerVersionRequest) GetPdbMaxUnavailable() string { + if x != nil { + return x.PdbMaxUnavailable + } + return "" +} + func (x *NewSchedulerVersionRequest) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -623,10 +641,12 @@ type PatchSchedulerRequest struct { Autoscaling *OptionalAutoscaling `protobuf:"bytes,6,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,7,rep,name=forwarders,proto3" json:"forwarders,omitempty"` + // PDB MaxUnavailable Specification + PdbMaxUnavailable string `protobuf:"bytes,8,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // Annotations declaration for the scheduler - Annotations map[string]string `protobuf:"bytes,8,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Labels declaration for the scheduler - Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *PatchSchedulerRequest) Reset() { @@ -710,6 +730,13 @@ func (x *PatchSchedulerRequest) GetForwarders() []*Forwarder { return nil } +func (x *PatchSchedulerRequest) GetPdbMaxUnavailable() string { + if x != nil { + return x.PdbMaxUnavailable + } + return "" +} + func (x *PatchSchedulerRequest) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -1218,7 +1245,7 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x09, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xfd, 0x04, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xad, 0x05, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x61, 0x6d, 0x65, 0x18, 0x02, @@ -1239,13 +1266,16 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x51, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x70, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x64, 0x62, 0x5f, 0x6d, 0x61, + 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, + 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x51, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x6c, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3e, 0x0a, @@ -1269,7 +1299,7 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, - 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xf5, 0x04, 0x0a, 0x1a, 0x4e, + 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xa5, 0x05, 0x0a, 0x1a, 0x4e, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, @@ -1290,13 +1320,16 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x6e, 0x67, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x66, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x64, 0x62, 0x5f, + 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, + 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, @@ -1313,7 +1346,7 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xaf, 0x05, 0x0a, 0x15, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, + 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xdf, 0x05, 0x0a, 0x15, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, @@ -1334,13 +1367,16 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x50, 0x0a, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, + 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, + 0x13, 0x70, 0x64, 0x62, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, + 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x50, 0x0a, + 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x41, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x41, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, diff --git a/proto/api/v1/messages.proto b/proto/api/v1/messages.proto index 0f6e267ec..07aab4be3 100644 --- a/proto/api/v1/messages.proto +++ b/proto/api/v1/messages.proto @@ -179,10 +179,12 @@ message Scheduler { optional Autoscaling autoscaling = 9; // List of Scheduler forwarders repeated Forwarder forwarders = 10; + // PDB MaxUnavailable Specification + string pdb_max_unavailable = 11; // List with annotations - map annotations = 11; + map annotations = 12; // List with labels - map labels = 12; + map labels = 13; } // Scheduler message used in the "ListScheduler version" definition. The "spec" is not implemented diff --git a/proto/api/v1/schedulers.proto b/proto/api/v1/schedulers.proto index c855df3f7..d7dfcbc42 100644 --- a/proto/api/v1/schedulers.proto +++ b/proto/api/v1/schedulers.proto @@ -123,10 +123,12 @@ message CreateSchedulerRequest { optional Autoscaling autoscaling = 7; // List of Scheduler forwarders repeated Forwarder forwarders = 8; + // PDB MaxUnavailable Specification + string pdb_max_unavailable = 9; // Add annotations for scheduler - map annotations = 9; + map annotations = 10; // Add labels for scheduler - map labels = 10; + map labels = 11; } // Get Scheduler operation request @@ -161,10 +163,12 @@ message NewSchedulerVersionRequest { optional Autoscaling autoscaling = 7; // List of Scheduler forwarders repeated Forwarder forwarders = 8; + // PDB MaxUnavailable Specification + string pdb_max_unavailable = 9; // New annotations for scheduler - map annotations = 9; + map annotations = 10; // New labels for scheduler - map labels = 10; + map labels = 11; } // Update schedule operation response payload. @@ -189,10 +193,12 @@ message PatchSchedulerRequest { optional OptionalAutoscaling autoscaling = 6; // List of Scheduler forwarders repeated Forwarder forwarders = 7; + // PDB MaxUnavailable Specification + string pdb_max_unavailable = 8; // Annotations declaration for the scheduler - map annotations = 8; + map annotations = 9; // Labels declaration for the scheduler - map labels = 9; + map labels = 10; } // PatchSchedulerResponse have the operation response id that represents the operation creted to this change. diff --git a/proto/apidocs.swagger.json b/proto/apidocs.swagger.json index a4c112706..93e41eda2 100644 --- a/proto/apidocs.swagger.json +++ b/proto/apidocs.swagger.json @@ -503,6 +503,10 @@ }, "title": "List of Scheduler forwarders" }, + "pdbMaxUnavailable": { + "type": "string", + "title": "PDB MaxUnavailable Specification" + }, "annotations": { "type": "object", "additionalProperties": { @@ -587,6 +591,10 @@ }, "title": "List of Scheduler forwarders" }, + "pdbMaxUnavailable": { + "type": "string", + "title": "PDB MaxUnavailable Specification" + }, "annotations": { "type": "object", "additionalProperties": { @@ -1241,6 +1249,10 @@ }, "title": "List of Scheduler forwarders" }, + "pdbMaxUnavailable": { + "type": "string", + "title": "PDB MaxUnavailable Specification" + }, "annotations": { "type": "object", "additionalProperties": { @@ -1741,6 +1753,10 @@ }, "title": "List of Scheduler forwarders" }, + "pdbMaxUnavailable": { + "type": "string", + "title": "PDB MaxUnavailable Specification" + }, "annotations": { "type": "object", "additionalProperties": { diff --git a/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json b/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json index 6d643d37f..e97c6e418 100644 --- a/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json +++ b/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json @@ -77,6 +77,7 @@ } } ], + "pdbMaxUnavailable": "10%", "annotations": { "imageregistry": "https://docker.hub.com/" }, diff --git a/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json b/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json index 191e557e0..9b5eef382 100644 --- a/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json +++ b/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json @@ -77,6 +77,7 @@ } } ], + "pdbMaxUnavailable": "10%", "annotations": {}, "labels": {} }