From 68408036e45cc6e2d8a1e88b0ff091ee63c2969d Mon Sep 17 00:00:00 2001
From: "Eduardo J. Ortega U" <5791035+ejortegau@users.noreply.github.com>
Date: Tue, 5 Nov 2024 10:15:54 +0100
Subject: [PATCH] Address PR comments
Signed-off-by: Eduardo J. Ortega U <5791035+ejortegau@users.noreply.github.com>
---
changelog/22.0/22.0.0/summary.md | 11 +--
.../reparentutil/emergency_reparenter.go | 5 +-
.../reparentutil/emergency_reparenter_test.go | 2 +-
go/vt/vtctl/reparentutil/reparent_sorter.go | 10 +--
.../reparentutil/reparent_sorter_test.go | 90 +++++++++----------
go/vt/vtctl/reparentutil/util.go | 6 +-
go/vt/vtctl/reparentutil/util_test.go | 10 +--
go/vt/vttablet/grpctmserver/server.go | 4 +-
go/vt/vttablet/tabletmanager/rpc_agent.go | 2 +-
go/vt/vttablet/tabletmanager/rpc_backup.go | 2 +-
go/vt/vttablet/tmrpctest/test_tm_rpc.go | 2 +-
proto/replicationdata.proto | 4 +-
proto/tabletmanagerdata.proto | 4 +-
13 files changed, 77 insertions(+), 75 deletions(-)
diff --git a/changelog/22.0/22.0.0/summary.md b/changelog/22.0/22.0.0/summary.md
index a5f0885e77a..ad146c8c746 100644
--- a/changelog/22.0/22.0.0/summary.md
+++ b/changelog/22.0/22.0.0/summary.md
@@ -4,14 +4,15 @@
- **[Known Issues](#known-issues)**
- **[Major Changes](#major-changes)**
- - **[Support for specifying expected primary in reparents](#reparents-prefer-not-backing-up)**
+ - **[Prefer not promoting a replica that is currently taking a backup](#reparents-prefer-not-backing-up)**
## Known Issues
## Major Changes
-### Support specifying expected primary in reparents
+### Prefer not promoting a replica that is currently taking a backup
-The execution of both Planned Shard Reparents and Emergency Reparents now prefer promoting replicas that are not
-currently taking backups. Notice that if there's only one suitable replica to promote, and it is taking a backup, it
-will still be promoted.
\ No newline at end of file
+Both planned and emergency failovers now prefer promoting replicas that are not
+currently taking backups. Note that if there's only one suitable replica to promote, and it is taking a backup, it
+will still be promoted. This does not apply to `builtin` backups. A replica that is currently taking a `builtin` backup
+will never be promoted.
\ No newline at end of file
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go
index 3b91d452c93..600832ca5d2 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go
@@ -422,10 +422,11 @@ func (erp *EmergencyReparenter) findMostAdvanced(
// We have already removed the tablets with errant GTIDs before calling this function. At this point our winning position must be a
// superset of all the other valid positions - unless the tablet with that position is taking a backup, in which case it might
- // have a larger GTID set but we are still not choosing it. If that is not the case, then we have a split brain scenario, and
+ // have a larger GTID set but we are still not choosing it. This can lead to split brain, so
// we should cancel the ERS
for i, position := range tabletPositions {
- if !winningPosition.AtLeast(position) && !backingUpTablets[topoproto.TabletAliasString(validTablets[i].Alias)] {
+ runningBackUp, ok := backingUpTablets[topoproto.TabletAliasString(validTablets[i].Alias)]
+ if !winningPosition.AtLeast(position) && ok && !runningBackUp {
return nil, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "split brain detected between servers - %v and %v", winningPrimaryTablet.Alias, validTablets[i].Alias)
}
}
diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
index 2ba8bfa107f..18c1d078b3e 100644
--- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
+++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go
@@ -2868,7 +2868,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) {
},
},
}, {
- name: "choose most advanced not backing up",
+ name: "choose most advanced not running backup",
validCandidates: map[string]replication.Position{
"zone1-0000000100": positionMostAdvanced,
"zone1-0000000101": positionIntermediate1,
diff --git a/go/vt/vtctl/reparentutil/reparent_sorter.go b/go/vt/vtctl/reparentutil/reparent_sorter.go
index abfcf4d5498..ff66b03ab74 100644
--- a/go/vt/vtctl/reparentutil/reparent_sorter.go
+++ b/go/vt/vtctl/reparentutil/reparent_sorter.go
@@ -33,7 +33,7 @@ type reparentSorter struct {
tablets []*topodatapb.Tablet
positions []replication.Position
innodbBufferPool []int
- backingUp map[string]bool
+ backupRunning map[string]bool
durability Durabler
}
@@ -44,7 +44,7 @@ func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Pos
positions: positions,
durability: durability,
innodbBufferPool: innodbBufferPool,
- backingUp: backingUp,
+ backupRunning: backingUp,
}
}
@@ -74,11 +74,11 @@ func (rs *reparentSorter) Less(i, j int) bool {
return true
}
- // We want tablets backing up always at the end of the list, so that's the first thing we check
- if !rs.backingUp[topoproto.TabletAliasString(rs.tablets[i].Alias)] && rs.backingUp[topoproto.TabletAliasString(rs.tablets[j].Alias)] {
+ // We want tablets which are currently running a backup to always be at the end of the list, so that's the first thing we check
+ if !rs.backupRunning[topoproto.TabletAliasString(rs.tablets[i].Alias)] && rs.backupRunning[topoproto.TabletAliasString(rs.tablets[j].Alias)] {
return true
}
- if rs.backingUp[topoproto.TabletAliasString(rs.tablets[i].Alias)] && !rs.backingUp[topoproto.TabletAliasString(rs.tablets[j].Alias)] {
+ if rs.backupRunning[topoproto.TabletAliasString(rs.tablets[i].Alias)] && !rs.backupRunning[topoproto.TabletAliasString(rs.tablets[j].Alias)] {
return false
}
diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go
index 18f633ce066..d2cca9e7de7 100644
--- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go
+++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go
@@ -90,63 +90,63 @@ func TestReparentSorter(t *testing.T) {
positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2)
testcases := []struct {
- name string
- tablets []*topodatapb.Tablet
- innodbBufferPool []int
- positions []replication.Position
- backingUpTablets map[string]bool
- containsErr string
- sortedTablets []*topodatapb.Tablet
+ name string
+ tablets []*topodatapb.Tablet
+ innodbBufferPool []int
+ positions []replication.Position
+ tabletBackupStatus map[string]bool
+ containsErr string
+ sortedTablets []*topodatapb.Tablet
}{
{
- name: "all advanced, sort via promotion rules",
- tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
- positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced},
- sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletRdonly1_102, nil},
+ name: "all advanced, sort via promotion rules",
+ tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
+ positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced},
+ sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletRdonly1_102, nil},
}, {
- name: "all advanced, sort via innodb buffer pool",
- tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false},
- positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced},
- innodbBufferPool: []int{10, 40, 25},
- sortedTablets: []*topodatapb.Tablet{tabletReplica2_100, tabletReplica1_100, tabletReplica1_101},
+ name: "all advanced, sort via innodb buffer pool",
+ tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false},
+ positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced},
+ innodbBufferPool: []int{10, 40, 25},
+ sortedTablets: []*topodatapb.Tablet{tabletReplica2_100, tabletReplica1_100, tabletReplica1_101},
}, {
- name: "ordering by position",
- tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
- positions: []replication.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced},
- sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, tabletReplica2_100, tabletReplica1_101},
+ name: "ordering by position",
+ tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
+ positions: []replication.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced},
+ sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, tabletReplica2_100, tabletReplica1_101},
}, {
name: "tablets and positions count error",
tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100},
positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced},
containsErr: "unequal number of tablets and positions",
}, {
- name: "promotion rule check",
- tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
- positions: []replication.Position{positionMostAdvanced, positionMostAdvanced},
- sortedTablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102},
+ name: "promotion rule check",
+ tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
+ positions: []replication.Position{positionMostAdvanced, positionMostAdvanced},
+ sortedTablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102},
}, {
- name: "mixed",
- tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
- positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1},
- sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletRdonly1_102, tabletReplica1_101},
+ name: "mixed",
+ tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
+ positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1},
+ sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletRdonly1_102, tabletReplica1_101},
}, {
- name: "mixed - another",
- tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
- positions: []replication.Position{positionIntermediate1, positionIntermediate1, positionMostAdvanced, positionIntermediate1},
- innodbBufferPool: []int{100, 200, 0, 200},
- sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletReplica1_101, tabletRdonly1_102},
+ name: "mixed - another",
+ tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_101.Alias): false, topoproto.TabletAliasString(tabletReplica2_100.Alias): false, topoproto.TabletAliasString(tabletReplica1_100.Alias): false, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
+ positions: []replication.Position{positionIntermediate1, positionIntermediate1, positionMostAdvanced, positionIntermediate1},
+ innodbBufferPool: []int{100, 200, 0, 200},
+ sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletReplica1_101, tabletRdonly1_102},
}, {
- name: "all advanced, sort via backup flag",
- tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102},
- backingUpTablets: map[string]bool{topoproto.TabletAliasString(tabletReplica1_100.Alias): true, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
- positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced},
- sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, nil},
+ name: "all advanced, sort via backup flag",
+ tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102},
+ tabletBackupStatus: map[string]bool{topoproto.TabletAliasString(tabletReplica1_100.Alias): true, topoproto.TabletAliasString(tabletRdonly1_102.Alias): false},
+ positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced},
+ sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, nil},
},
}
@@ -154,7 +154,7 @@ func TestReparentSorter(t *testing.T) {
require.NoError(t, err)
for _, testcase := range testcases {
t.Run(testcase.name, func(t *testing.T) {
- err := sortTabletsForReparent(testcase.tablets, testcase.positions, testcase.innodbBufferPool, testcase.backingUpTablets, durability)
+ err := sortTabletsForReparent(testcase.tablets, testcase.positions, testcase.innodbBufferPool, testcase.tabletBackupStatus, durability)
if testcase.containsErr != "" {
require.EqualError(t, err, testcase.containsErr)
} else {
diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go
index fc2e9f756fd..ad2331e7942 100644
--- a/go/vt/vtctl/reparentutil/util.go
+++ b/go/vt/vtctl/reparentutil/util.go
@@ -127,7 +127,7 @@ func ElectNewPrimary(
tb := tablet
errorGroup.Go(func() error {
// find and store the positions for the tablet
- pos, replLag, backingUp, err := findPositionLagBackingUpForTablet(groupCtx, tb, logger, tmc, opts.WaitReplicasTimeout)
+ pos, replLag, backingUp, err := findTabletPositionLagBackupStatus(groupCtx, tb, logger, tmc, opts.WaitReplicasTimeout)
mu.Lock()
defer mu.Unlock()
if err == nil && (opts.TolerableReplLag == 0 || opts.TolerableReplLag >= replLag) {
@@ -161,9 +161,9 @@ func ElectNewPrimary(
return validTablets[0].Alias, nil
}
-// findPositionLagBackingUpForTablet processes the replication position and lag for a single tablet and
+// findTabletPositionLagBackupStatus processes the replication position and lag for a single tablet and
// returns it. It is safe to call from multiple goroutines.
-func findPositionLagBackingUpForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, time.Duration, bool, error) {
+func findTabletPositionLagBackupStatus(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, time.Duration, bool, error) {
logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias))
ctx, cancel := context.WithTimeout(ctx, waitTimeout)
diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go
index 734aeef4986..c7c10601b79 100644
--- a/go/vt/vtctl/reparentutil/util_test.go
+++ b/go/vt/vtctl/reparentutil/util_test.go
@@ -140,7 +140,7 @@ func TestElectNewPrimary(t *testing.T) {
errContains: nil,
},
{
- name: "Two good replicas, but one of them backing up so we pick the other one",
+ name: "Two good replicas, but one of them is taking a backup so we pick the other one",
tmc: &chooseNewPrimaryTestTMClient{
// both zone1-101 and zone1-102 are equivalent from a replicaiton PoV, but zone1-102 is taking a backup
replicationStatuses: map[string]*replicationdatapb.Status{
@@ -201,7 +201,7 @@ func TestElectNewPrimary(t *testing.T) {
errContains: nil,
},
{
- name: "Only one replica, but it's backing up. We still use it.",
+ name: "Only one replica, but it's taking a backup. We still use it.",
tmc: &chooseNewPrimaryTestTMClient{
// both zone1-101 and zone1-102 are equivalent from a replicaiton PoV, but zone1-102 is taking a backup
replicationStatuses: map[string]*replicationdatapb.Status{
@@ -524,7 +524,7 @@ func TestElectNewPrimary(t *testing.T) {
errContains: nil,
},
{
- name: "Two replicas, first one with too much lag, another one backing up - elect the one taking backup",
+ name: "Two replicas, first one with too much lag, another one taking a backup - elect the one taking backup",
tmc: &chooseNewPrimaryTestTMClient{
// zone1-101 is behind zone1-102
replicationStatuses: map[string]*replicationdatapb.Status{
@@ -1083,7 +1083,7 @@ func TestFindPositionForTablet(t *testing.T) {
expectedLag: 201 * time.Second,
expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
}, {
- name: "Host is backing up",
+ name: "Host is taking a backup",
tmc: &testutil.TabletManagerClient{
ReplicationStatusResults: map[string]struct {
Position *replicationdatapb.Status
@@ -1177,7 +1177,7 @@ func TestFindPositionForTablet(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
- pos, lag, backingUp, err := findPositionLagBackingUpForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second)
+ pos, lag, backingUp, err := findTabletPositionLagBackupStatus(ctx, test.tablet, logger, test.tmc, 10*time.Second)
if test.expectedErr != "" {
require.EqualError(t, err, test.expectedErr)
return
diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go
index a2acc00d000..986be6c6532 100644
--- a/go/vt/vttablet/grpctmserver/server.go
+++ b/go/vt/vttablet/grpctmserver/server.go
@@ -342,7 +342,7 @@ func (s *server) MysqlHostMetrics(ctx context.Context, request *tabletmanagerdat
func (s *server) ReplicationStatus(ctx context.Context, request *tabletmanagerdatapb.ReplicationStatusRequest) (response *tabletmanagerdatapb.ReplicationStatusResponse, err error) {
defer s.tm.HandleRPCPanic(ctx, "ReplicationStatus", request, response, false /*verbose*/, &err)
ctx = callinfo.GRPCCallInfo(ctx)
- response = &tabletmanagerdatapb.ReplicationStatusResponse{BackingUp: s.tm.IsBackingUp()}
+ response = &tabletmanagerdatapb.ReplicationStatusResponse{BackingUp: s.tm.IsBackupRunning()}
status, err := s.tm.ReplicationStatus(ctx)
if err == nil {
response.Status = status
@@ -626,7 +626,7 @@ func (s *server) StopReplicationAndGetStatus(ctx context.Context, request *table
response.Status = statusResponse.Status
}
- response.BackingUp = s.tm.IsBackingUp()
+ response.BackingUp = s.tm.IsBackupRunning()
return response, err
}
diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go
index d892bb705e0..f44f1cba01b 100644
--- a/go/vt/vttablet/tabletmanager/rpc_agent.go
+++ b/go/vt/vttablet/tabletmanager/rpc_agent.go
@@ -164,7 +164,7 @@ type RPCTM interface {
RestoreFromBackup(ctx context.Context, logger logutil.Logger, request *tabletmanagerdatapb.RestoreFromBackupRequest) error
- IsBackingUp() bool
+ IsBackupRunning() bool
// HandleRPCPanic is to be called in a defer statement in each
// RPC input point.
diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go
index 411f2435f43..22fe72716dd 100644
--- a/go/vt/vttablet/tabletmanager/rpc_backup.go
+++ b/go/vt/vttablet/tabletmanager/rpc_backup.go
@@ -205,7 +205,7 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L
return err
}
-func (tm *TabletManager) IsBackingUp() bool {
+func (tm *TabletManager) IsBackupRunning() bool {
return tm._isBackupRunning
}
diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go
index 803e4ff2be4..9d5a12a0545 100644
--- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go
+++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go
@@ -1411,7 +1411,7 @@ func (fra *fakeRPCTM) Backup(ctx context.Context, logger logutil.Logger, request
return nil
}
-func (fra *fakeRPCTM) IsBackingUp() bool {
+func (fra *fakeRPCTM) IsBackupRunning() bool {
return false
}
diff --git a/proto/replicationdata.proto b/proto/replicationdata.proto
index 1a54ca2ef27..76ca3e02103 100644
--- a/proto/replicationdata.proto
+++ b/proto/replicationdata.proto
@@ -50,7 +50,7 @@ message Status {
bool has_replication_filters = 22;
bool ssl_allowed = 23;
bool replication_lag_unknown = 24;
- bool backingUp = 25;
+ bool backup_running = 25;
}
// Configuration holds replication configuration information gathered from performance_schema and global variables.
@@ -66,7 +66,7 @@ message Configuration {
message StopReplicationStatus {
replicationdata.Status before = 1;
replicationdata.Status after = 2;
- bool backingUp = 3;
+ bool backup_running = 3;
}
// StopReplicationMode is used to provide controls over how replication is stopped.
diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto
index 3042df23560..64e24ade4a8 100644
--- a/proto/tabletmanagerdata.proto
+++ b/proto/tabletmanagerdata.proto
@@ -348,7 +348,7 @@ message ReplicationStatusRequest {
message ReplicationStatusResponse {
replicationdata.Status status = 1;
- bool backingUp = 2;
+ bool backup_running = 2;
}
message PrimaryStatusRequest {
@@ -537,7 +537,7 @@ message StopReplicationAndGetStatusResponse {
// Status represents the replication status call right before, and right after telling the replica to stop.
replicationdata.StopReplicationStatus status = 2;
- bool backingUp = 3;
+ bool backup_running = 3;
}
message PromoteReplicaRequest {