Skip to content

Commit

Permalink
K8SPSMDB-1205: Allow backups in unmanaged clusters (#1715)
Browse files Browse the repository at this point in the history
* K8SPSMDB-1205: Allow backups in unmanaged clusters

* don't allow restores in unmanaged clusters

---------

Co-authored-by: Viacheslav Sarzhan <slava.sarzhan@percona.com>
  • Loading branch information
egegunes and hors authored Nov 13, 2024
1 parent 5f9860e commit 40ec6d1
Show file tree
Hide file tree
Showing 6 changed files with 35 additions and 29 deletions.
20 changes: 11 additions & 9 deletions e2e-tests/cross-site-sharded/run
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,21 @@ replica_cluster="cross-site-sharded-replica"
wait_for_members() {
local endpoint="$1"
local rsName="$2"
local nodes_amount=0
until [[ ${nodes_amount} == 6 ]]; do
nodes_amount=$(run_mongos 'rs.conf().members.length' "clusterAdmin:clusterAdmin123456@$endpoint" "mongodb" ":27017" \
local target_count=$3

local nodes_count=0
until [[ ${nodes_count} == ${target_count} ]]; do
nodes_count=$(run_mongos 'rs.conf().members.length' "clusterAdmin:clusterAdmin123456@$endpoint" "mongodb" ":27017" \
| egrep -v 'I NETWORK|W NETWORK|Error saving history file|Percona Server for MongoDB|connecting to:|Unable to reach primary for set|Implicit session:|versions do not match|Error saving history file:|bye' \
| $sed -re 's/ObjectId\("[0-9a-f]+"\)//; s/-[0-9]+.svc/-xxx.svc/')

echo "waiting for all members to be configured in ${rsName}"
echo -n "waiting for all members to be configured in ${rsName}"
let retry+=1
if [ $retry -ge 15 ]; then
echo "Max retry count $retry reached. something went wrong with mongo cluster. Config for endpoint $endpoint has $nodes_amount but expected 6."
echo "Max retry count ${retry} reached. something went wrong with mongo cluster. Config for endpoint ${endpoint} has ${nodes_count} but expected ${target_count}."
exit 1
fi
echo -n .
echo .
sleep 10
done
}
Expand Down Expand Up @@ -164,9 +166,9 @@ kubectl_bin patch psmdb ${main_cluster} --type=merge --patch '{
}
}'

wait_for_members $replica_cfg_0_endpoint cfg
wait_for_members $replica_rs0_0_endpoint rs0
wait_for_members $replica_rs1_0_endpoint rs1
wait_for_members $replica_cfg_0_endpoint cfg 6
wait_for_members $replica_rs0_0_endpoint rs0 6
wait_for_members $replica_rs1_0_endpoint rs1 6

kubectl_bin config set-context $(kubectl_bin config current-context) --namespace="$replica_namespace"

Expand Down
4 changes: 0 additions & 4 deletions pkg/apis/psmdb/v1/psmdb_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -589,10 +589,6 @@ func (cr *PerconaServerMongoDB) CheckNSetDefaults(platform version.Platform, log
cr.Spec.ClusterServiceDNSMode = DNSModeInternal
}

if cr.Spec.Unmanaged && cr.Spec.Backup.Enabled {
return errors.New("backup.enabled must be false on unmanaged clusters")
}

if cr.Spec.Unmanaged && cr.Spec.UpdateStrategy == SmartUpdateStatefulSetStrategyType {
return errors.New("SmartUpdate is not allowed on unmanaged clusters, set updateStrategy to RollingUpdate or OnDelete")
}
Expand Down
18 changes: 13 additions & 5 deletions pkg/apis/psmdb/v1/psmdb_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -1158,11 +1158,8 @@ func (cr *PerconaServerMongoDB) MongosNamespacedName() types.NamespacedName {
}

func (cr *PerconaServerMongoDB) CanBackup(ctx context.Context) error {
logf.FromContext(ctx).V(1).Info("checking if backup is allowed", "backup", cr.Name)

if cr.Spec.Unmanaged {
return errors.Errorf("backups are not allowed on unmanaged clusters")
}
log := logf.FromContext(ctx).V(1).WithValues("cluster", cr.Name, "namespace", cr.Namespace)
log.Info("checking if backup is allowed")

if cr.Status.State == AppStateReady {
return nil
Expand All @@ -1185,6 +1182,17 @@ func (cr *PerconaServerMongoDB) CanBackup(ctx context.Context) error {
return nil
}

func (cr *PerconaServerMongoDB) CanRestore(ctx context.Context) error {
log := logf.FromContext(ctx).V(1).WithValues("cluster", cr.Name, "namespace", cr.Namespace)
log.Info("checking if restore is allowed")

if cr.Spec.Unmanaged {
return errors.New("can't run restore in an unmanaged cluster")
}

return nil
}

const maxStatusesQuantity = 20

func (s *PerconaServerMongoDBStatus) AddCondition(c ClusterCondition) {
Expand Down
6 changes: 1 addition & 5 deletions pkg/controller/perconaservermongodbbackup/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,11 +199,7 @@ func (b *Backup) Status(ctx context.Context, cr *api.PerconaServerMongoDBBackup)
func backupPods(replsets []pbmBackup.BackupReplset) map[string]string {
pods := make(map[string]string)
for _, rs := range replsets {
spl := strings.Split(rs.Node, ".")
if len(spl) == 0 {
continue
}
pods[rs.Name] = spl[0]
pods[rs.Name] = rs.Node
}
return pods
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,17 +153,21 @@ func (r *ReconcilePerconaServerMongoDBRestore) Reconcile(ctx context.Context, re
return reconcile.Result{}, nil
}

bcp, err := r.getBackup(ctx, cr)
if err != nil {
return rr, errors.Wrap(err, "get backup")
}

cluster := new(psmdbv1.PerconaServerMongoDB)
err = r.client.Get(ctx, types.NamespacedName{Name: cr.Spec.ClusterName, Namespace: cr.Namespace}, cluster)
if err != nil {
return rr, errors.Wrapf(err, "get cluster %s/%s", cr.Namespace, cr.Spec.ClusterName)
}

if err = cluster.CanRestore(ctx); err != nil {
return reconcile.Result{}, errors.Wrap(err, "can cluster restore")
}

bcp, err := r.getBackup(ctx, cr)
if err != nil {
return rr, errors.Wrap(err, "get backup")
}

var svr *version.ServerVersion
svr, err = version.Server(r.clientcmd)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/psmdb/backup/pbm.go
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ func (b *pbmC) Node(ctx context.Context) (string, error) {
return "", err
}

return strings.Split(lock.Node, ".")[0], nil
return lock.Node, nil
}

func (b *pbmC) GetStorage(ctx context.Context, e pbmLog.LogEvent) (storage.Storage, error) {
Expand Down

0 comments on commit 40ec6d1

Please sign in to comment.