Skip to content

Commit

Permalink
Store MySQL server version in basebackup_info (basebackup.json)
Browse files Browse the repository at this point in the history
  • Loading branch information
egor-voynov-aiven committed Aug 19, 2024
1 parent 4bfc957 commit d5457d7
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 0 deletions.
4 changes: 4 additions & 0 deletions myhoard/backup_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
DEFAULT_XTRABACKUP_SETTINGS,
ERR_TIMEOUT,
first_contains_gtids_not_in_second,
get_mysql_version,
GtidExecuted,
make_fs_metadata,
mysql_cursor,
Expand Down Expand Up @@ -978,10 +979,12 @@ def _take_basebackup(self) -> None:
# FLUSH BINARY LOGS might take a long time if the server is under heavy load,
# use longer than normal timeout here with multiple retries and increasing timeout.
connect_params = dict(self.mysql_client_params)
mysql_version = None
for retry, multiplier in [(True, 1), (True, 2), (False, 3)]:
try:
connect_params["timeout"] = DEFAULT_MYSQL_TIMEOUT * 5 * multiplier
with mysql_cursor(**connect_params) as cursor:
mysql_version = get_mysql_version(cursor)
cursor.execute("FLUSH BINARY LOGS")
cursor.execute("SELECT @@GLOBAL.gtid_executed AS gtid_executed")
gtid_executed = parse_gtid_range_string(cast(dict, cursor.fetchone())["gtid_executed"])
Expand Down Expand Up @@ -1031,6 +1034,7 @@ def _take_basebackup(self) -> None:
"start_size": self.basebackup_operation.data_directory_size_start,
"start_ts": start_time,
"uploaded_from": self.server_id,
"mysql_version": mysql_version,
}
self.file_storage.store_file_from_memory(
self._build_full_name("basebackup.json"),
Expand Down
1 change: 1 addition & 0 deletions test/test_backup_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ def _run_backup_stream_test(session_tmpdir, mysql_master: MySQLConfig, backup_st

assert bs.is_binlog_safe_to_delete(new_binlogs[0])
assert bs.is_log_backed_up(log_index=new_binlogs[0]["local_index"])
assert bs.state["basebackup_info"].get("mysql_version") is not None

# remote_gtid_executed will be updated once the stream notices the new binlog that was uploaded above
wait_for_condition(lambda: bs_observer.state["remote_gtid_executed"] != gtid_executed)
Expand Down

0 comments on commit d5457d7

Please sign in to comment.