diff --git a/cheta/fetch.py b/cheta/fetch.py index 1333a08..ebe2ff0 100644 --- a/cheta/fetch.py +++ b/cheta/fetch.py @@ -561,9 +561,7 @@ def __init__(self, msid, start=LAUNCH_DATE, stop=None, filter_bad=False, stat=No start, stop = intervals[0][0], intervals[-1][1] self.tstart = DateTime(start).secs - self.tstop = ( - DateTime(stop).secs if stop else DateTime(time.time(), format="unix").secs - ) + self.tstop = DateTime(stop).secs self.datestart = DateTime(self.tstart).date self.datestop = DateTime(self.tstop).date self.data_source = {} @@ -2286,8 +2284,7 @@ def create_msid_data_gap(msid_obj: MSID, data_gap_spec: str): start = CxoTime(args.start) stop = CxoTime(args.stop) logger.info( - f"Creating data gap for {msid_obj.MSID} " - f"from {start.date} to {stop.date}" + f"Creating data gap for {msid_obj.MSID} from {start.date} to {stop.date}" ) i0, i1 = np.searchsorted(msid_obj.times, [start.secs, stop.secs]) for attr in msid_obj.colnames: diff --git a/cheta/tests/test_fetch.py b/cheta/tests/test_fetch.py index 0e7790b..d761b27 100644 --- a/cheta/tests/test_fetch.py +++ b/cheta/tests/test_fetch.py @@ -367,6 +367,13 @@ def test_interpolate_times_raise(): dat.interpolate(10.0, times=[1, 2]) +def test_cxotime_now(monkeypatch): + monkeypatch.setenv("CXOTIME_NOW", "2025:002:00:00:00") + dat = fetch.Msid("tephin", "2025:001:12:00:00", stat="5min") + assert CxoTime(dat.times[-1]).date < "2025:002:00:00:00.000" + assert len(dat) == 132 # Matches number of 5 min intervals in 12 hours + + def test_interpolate_times(): dat = fetch.MSIDset( ["aoattqt1", "aogyrct1", "aopcadmd"], "2008:002:21:48:00", "2008:002:21:50:00" diff --git a/cheta/update_archive.py b/cheta/update_archive.py index b790a13..1f88145 100755 --- a/cheta/update_archive.py +++ b/cheta/update_archive.py @@ -255,11 +255,11 @@ def main_loop(): create_content_dir() if not os.path.exists(msid_files["colnames"].abs): - logger.info(f'No colnames.pickle for {ft["content"]} - skipping') + logger.info(f"No colnames.pickle for {ft['content']} - skipping") continue if not os.path.exists(fetch.msid_files["archfiles"].abs): - logger.info(f'No archfiles.db3 for {ft["content"]} - skipping') + logger.info(f"No archfiles.db3 for {ft['content']} - skipping") continue # Column names for stats updates (without TIME, MJF, MNF, TLM_FMT) @@ -848,7 +848,7 @@ def truncate_archive(filetype, date): """Truncate msid and statfiles for every archive file after date (to nearest year:doy) """ - logger.info(f'Truncating {filetype["content"]} full and stat files after {date}') + logger.info(f"Truncating {filetype['content']} full and stat files after {date}") colnames = pickle.load(open(msid_files["colnames"].abs, "rb")) date = DateTime(date).date diff --git a/cheta/update_client_archive.py b/cheta/update_client_archive.py index 2cf3dbb..1680725 100644 --- a/cheta/update_client_archive.py +++ b/cheta/update_client_archive.py @@ -509,7 +509,7 @@ def as_python(val): vals = { name: as_python(archfile[name]) for name in archfile.dtype.names } - logger.debug(f'Inserting {vals["filename"]}') + logger.debug(f"Inserting {vals['filename']}") if not opt.dry_run: try: db.insert(vals, "archfiles") @@ -754,7 +754,7 @@ def append_stat_col(dat, stat_file, msid, date_id, opt, logger): vals = {key: dat[f"{msid}.{key}"] for key in ("data", "row0", "row1")} logger.debug( f"append_stat_col msid={msid} date_id={date_id}, " - f'row0,1 = {vals["row0"]} {vals["row1"]}' + f"row0,1 = {vals['row0']} {vals['row1']}" ) mode = "r" if opt.dry_run else "a" @@ -765,7 +765,7 @@ def append_stat_col(dat, stat_file, msid, date_id, opt, logger): if vals["row1"] - 1 <= last_row_idx: logger.debug( f"Skipping {date_id} for {msid}: no new data " - f'row1={vals["row1"]} last_row_idx={last_row_idx}' + f"row1={vals['row1']} last_row_idx={last_row_idx}" ) return @@ -780,14 +780,14 @@ def append_stat_col(dat, stat_file, msid, date_id, opt, logger): if vals["row0"] != len(h5.root.data): raise RowMismatchError( f"ERROR: unexpected discontinuity for stat msid={msid} " - f'content={fetch.ft["content"]}\n' + f"content={fetch.ft['content']}\n" "Looks like your archive is in a bad state, CONTACT " "your local Ska expert with this info:\n" - f' First row0 in new data {vals["row0"]} != ' + f" First row0 in new data {vals['row0']} != " f"length of existing data {len(h5.root.data)}" ) - logger.debug(f'Appending {len(vals["data"])} rows to {stat_file}') + logger.debug(f"Appending {len(vals['data'])} rows to {stat_file}") if not opt.dry_run: h5.root.data.append(vals["data"]) @@ -873,10 +873,10 @@ def append_h5_col(opt, msid, vals, logger, msid_files): if vals["row0"] != len(h5.root.data): raise RowMismatchError( f"ERROR: unexpected discontinuity for full msid={msid} " - f'content={fetch.ft["content"]}\n' + f"content={fetch.ft['content']}\n" "Looks like your archive is in a bad state, CONTACT " "your local Ska expert with this info:\n" - f' First row0 in new data {vals["row0"]} != ' + f" First row0 in new data {vals['row0']} != " f"length of existing data {len(h5.root.data)}" ) diff --git a/cheta/update_server_sync.py b/cheta/update_server_sync.py index bcf800d..272656b 100644 --- a/cheta/update_server_sync.py +++ b/cheta/update_server_sync.py @@ -236,7 +236,7 @@ def check_index_tbl_consistency(index_tbl): for idx, row0, row1 in zip(count(), index_tbl[:-1], index_tbl[1:]): if row0["row1"] != row1["row0"]: - msg = f'rows not contiguous at table date0={index_tbl["date_id"][idx]}' + msg = f"rows not contiguous at table date0={index_tbl['date_id'][idx]}" return msg # No problems @@ -308,7 +308,7 @@ def update_index_file(index_file, opt, logger): break if not rows: - logger.info(f'No updates available for content {fetch.ft["content"]}') + logger.info(f"No updates available for content {fetch.ft['content']}") return index_tbl # Create table from scratch or add new rows. In normal processing there @@ -367,8 +367,8 @@ def update_sync_data_full(content, logger, row): with DBI(dbi="sqlite", server=fetch.msid_files["archfiles"].abs) as dbi: query = ( "select * from archfiles " - f'where filetime >= {row["filetime0"]} ' - f'and filetime <= {row["filetime1"]} ' + f"where filetime >= {row['filetime0']} " + f"and filetime <= {row['filetime1']} " "order by filetime " ) archfiles = dbi.fetchall(query)