diff --git a/msnoise/default.csv b/msnoise/default.csv index af1a7c7a..df8f1df0 100644 --- a/msnoise/default.csv +++ b/msnoise/default.csv @@ -40,6 +40,9 @@ keep_days,Y,Keep all daily cross-corr,bool,Y/N,, ref_begin,1970-01-01,Beginning or REF stacks. Can be absolute (2012-01-01) or relative (-100) days,str,,, ref_end,2100-01-01,End or REF stacks. Same as ``ref_begin``,str,,, mov_stack,"(('1d','1d'))","A list of two parameters: the time to ""roll"" over (default 1 day) and the granularity (step) of the resulting stacked CCFs (default 1 day) to stack for the Moving-window stacks. This can be a list of tuples, e.g. (('1d','1d'),('2d','1d')) corresponds to the MSNoise 1.6 ""1,2"" before. Time deltas can be anything pandas can interpret (""d"", ""min"", ""sec"", etc).",eval,,, +wienerfilt,N,"Apply wiener filter before stacking? Y[N]",bool,Y/N,, +wiener_Mlen,"24h","Smoothing along date axis (time delta)",str,,, +wiener_Nlen,"0.5s","Smoothing along lag time axis (time delta)",str,,, export_format,MSEED,Export stacks in which format(s) ?,str,SAC/MSEED/BOTH,, sac_format,doublets,Format for SAC stacks ?,str,doublets/clarke,, dtt_lag,static,How is the lag window defined,str,dynamic/static,, diff --git a/msnoise/s04_stack2.py b/msnoise/s04_stack2.py index 76071c20..2c27452e 100644 --- a/msnoise/s04_stack2.py +++ b/msnoise/s04_stack2.py @@ -101,6 +101,8 @@ import scipy.signal from .api import * +from .wiener import * + import logbook import matplotlib.pyplot as plt @@ -132,6 +134,15 @@ def main(stype, interval=1.0, loglevel="INFO"): filters = get_filters(db, all=False) + wiener_Mlen = params.wiener_Mlen + wiener_Nlen = params.wiener_Nlen + wienerfilt = params.wienerfilt + wiener_M = int(pd.to_timedelta(wiener_Mlen).total_seconds() / params.corr_duration) + wiener_N = int(pd.to_timedelta(wiener_Nlen).total_seconds() * params.cc_sampling_rate) + + if wienerfilt: + logger.info('Wiener filter enabled, will apply to CCFs before stacking') + if stype == "ref": pairs = [] @@ -163,12 +174,12 @@ def main(stype, interval=1.0, loglevel="INFO"): logger.debug('No data found for %s:%s-%s-%i' % (sta1, sta2, components, filterid)) continue - start = np.array(start, dtype=np.datetime64) - end = np.array(end, dtype=np.datetime64) - _ = dr.where(dr.times >= start, drop=True) - _ = _.where(_.times <= end, drop=True) + + if wienerfilt: + dr = wiener_filt(dr, wiener_M, wiener_N) + # TODO add other stack methods here! using apply? - _ = _.mean(dim="times") + _ = dr.mean(dim="times") xr_save_ref(sta1, sta2, components, filterid, taxis, _) else: #stype== 'mov' @@ -228,6 +239,9 @@ def main(stype, interval=1.0, loglevel="INFO"): dr = c dr = dr.resample(times="%is" % params.corr_duration).mean() + if wienerfilt: + dr = wiener_filt(dr, wiener_M, wiener_N) + for mov_stack in mov_stacks: # if mov_stack > len(dr.times): # logger.error("not enough data for mov_stack=%i" % mov_stack) diff --git a/msnoise/wiener.py b/msnoise/wiener.py new file mode 100644 index 00000000..4e4e2258 --- /dev/null +++ b/msnoise/wiener.py @@ -0,0 +1,45 @@ +import numpy as np +from scipy.signal import wiener, butter, filtfilt +import glob +import os +import matplotlib.pyplot as plt +from obspy import Stream, read, Trace +from concurrent.futures import ProcessPoolExecutor + +def find_segments(data): + + """ Identify continuous non-Nan segments in xarray """ + + current_segment = [] + continuous_segments = [] + + for i in range(data.shape[0]): + if not data[i, :].isnull().all(): + current_segment.append(i) #add index of array + else: + if len(current_segment) > 0: + continuous_segments.append(current_segment) + current_segment = [] + + if len(current_segment) > 0: + continuous_segments.append(current_segment) + + return continuous_segments + +def wiener_filt(data, M, N): + + ccfs = data['CCF'] + segments = find_segments(ccfs) #get segments of continuous data to apply wiener filter + + filtered_ccfs = ccfs.copy(deep=True) + + for segment in segments: + segment_data = ccfs[segment, :].values + filtered_segment = wiener(segment_data, (M,N)) + filtered_ccfs[segment,:] = filtered_segment + + filtered_data = data.copy(deep=True) + filtered_data['CCF'] = filtered_ccfs + + return filtered_data +