Skip to content

Commit

Permalink
Fix image pull policy in the pod spec
Browse files Browse the repository at this point in the history
  • Loading branch information
reinaldooli committed Nov 14, 2023
1 parent 444e4c1 commit 9c80406
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 20 deletions.
11 changes: 6 additions & 5 deletions internal/adapters/runtime/kubernetes/game_room_convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,11 +102,12 @@ func convertGameRoomSpec(scheduler entities.Scheduler, gameRoomName string, game

func convertContainer(container game_room.Container, schedulerID, roomID string) (v1.Container, error) {
podContainer := v1.Container{
Name: container.Name,
Image: container.Image,
Command: container.Command,
Ports: []v1.ContainerPort{},
Env: []v1.EnvVar{},
Name: container.Name,
Image: container.Image,
ImagePullPolicy: v1.PullPolicy(container.ImagePullPolicy),
Command: container.Command,
Ports: []v1.ContainerPort{},
Env: []v1.EnvVar{},
}

for _, port := range container.Ports {
Expand Down
3 changes: 2 additions & 1 deletion internal/api/handlers/requestadapters/schedulers.go
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,8 @@ func getRoomOccupancy(roomOccupancyParameters *autoscaling.RoomOccupancyParams)
return nil
}
return &api.RoomOccupancy{
ReadyTarget: float32(roomOccupancyParameters.ReadyTarget),
ReadyTarget: float32(roomOccupancyParameters.ReadyTarget),
DownThreshold: float32(roomOccupancyParameters.DownThreshold),
}
}

Expand Down
14 changes: 8 additions & 6 deletions internal/core/entities/scheduler_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,10 @@ package entities
import "github.com/topfreegames/maestro/internal/core/entities/autoscaling"

type AutoscalingInfo struct {
Enabled bool
Min int
Max int
Enabled bool
Min int
Max int
Cooldown int
}

type SchedulerInfo struct {
Expand Down Expand Up @@ -101,9 +102,10 @@ func WithAutoscalingInfo(schedulerAutoscaling *autoscaling.Autoscaling) Schedule
return func(schedulerInfo *SchedulerInfo) {
if schedulerAutoscaling != nil {
autoscalingInfo := &AutoscalingInfo{
Enabled: schedulerAutoscaling.Enabled,
Min: schedulerAutoscaling.Min,
Max: schedulerAutoscaling.Max,
Enabled: schedulerAutoscaling.Enabled,
Min: schedulerAutoscaling.Min,
Max: schedulerAutoscaling.Max,
Cooldown: schedulerAutoscaling.Cooldown,
}
schedulerInfo.Autoscaling = autoscalingInfo
}
Expand Down
23 changes: 18 additions & 5 deletions internal/core/operations/healthcontroller/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,8 @@ func (ex *Executor) ensureDesiredAmountOfInstances(ctx context.Context, op *oper

switch {
case actualAmount > desiredAmount: // Need to scale down
if ex.canPerformDownscale(ctx, scheduler, logger) {
can, msg := ex.canPerformDownscale(ctx, scheduler, logger)
if can {
scheduler.LastDownscaleAt = time.Now().UTC()
if err := ex.schedulerStorage.UpdateScheduler(ctx, scheduler); err != nil {
logger.Error("error updating scheduler", zap.Error(err))
Expand All @@ -191,7 +192,7 @@ func (ex *Executor) ensureDesiredAmountOfInstances(ctx context.Context, op *oper
msgToAppend = fmt.Sprintf("created operation (id: %s) to remove %v rooms.", removeOperation.ID, removeAmount)
} else {
tookAction = false
msgToAppend = "cannot scale down because cool down period has not passed yet"
msgToAppend = msg
}
case actualAmount < desiredAmount: // Need to scale up
addAmount := desiredAmount - actualAmount
Expand Down Expand Up @@ -329,11 +330,11 @@ func (ex *Executor) setTookAction(def *Definition, tookAction bool) {
def.TookAction = &tookAction
}

func (ex *Executor) canPerformDownscale(ctx context.Context, scheduler *entities.Scheduler, logger *zap.Logger) bool {
func (ex *Executor) canPerformDownscale(ctx context.Context, scheduler *entities.Scheduler, logger *zap.Logger) (bool, string) {
can, err := ex.autoscaler.CanDownscale(ctx, scheduler)
if err != nil {
logger.Error("error checking if scheduler can downscale", zap.Error(err))
return can
return can, err.Error()
}

cooldown := 0
Expand All @@ -343,5 +344,17 @@ func (ex *Executor) canPerformDownscale(ctx context.Context, scheduler *entities
cooldownDuration := time.Duration(cooldown) * time.Second
waitingCooldown := scheduler.LastDownscaleAt.Add(cooldownDuration).After(time.Now().UTC())

return can && !waitingCooldown
if !can {
message := fmt.Sprintf("scheduler %s can't downscale, occupation is above the threshold", scheduler.Name)
logger.Info(message)
return false, message
}

if can && waitingCooldown {
message := fmt.Sprintf("scheduler %s can downscale, but cooldown period has not passed yet", scheduler.Name)
logger.Info(message)
return false, message
}

return can && !waitingCooldown, "ok"
}
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ func TestCanDownscale(t *testing.T) {
assert.Truef(t, allow, "downscale should be allowed")
})

t.Run("it is expected to allow downscale when occupation is equal the threshold", func(t *testing.T) {
t.Run("it is expected to allow downscale when occupation below the threshold", func(t *testing.T) {
readyTarget := float64(0.5)
downThreshold := float64(0.6)
occupiedRooms := 60
Expand Down Expand Up @@ -419,7 +419,7 @@ func TestCanDownscale(t *testing.T) {
_, err := policy.CanDownscale(policyParams, schedulerState)
assert.EqualError(t, err, "Downscale threshold must be between 0 and 1")
})
t.Run("when ready target is 0", func(t *testing.T) {
t.Run("when down threshold is 0", func(t *testing.T) {
downThreshold := float64(0.0)
occupiedRooms := 10
readyRooms := 10
Expand All @@ -438,7 +438,7 @@ func TestCanDownscale(t *testing.T) {
_, err := policy.CanDownscale(policyParams, schedulerState)
assert.EqualError(t, err, "Downscale threshold must be between 0 and 1")
})
t.Run("when ready target is lower than 0", func(t *testing.T) {
t.Run("when down threshold is lower than 0", func(t *testing.T) {
downThreshold := float64(-0.1)
occupiedRooms := 10
readyRooms := 10
Expand Down

0 comments on commit 9c80406

Please sign in to comment.