diff --git a/002b_special_instrumented_cells_details.py b/002b_special_instrumented_cells_details.py index 043c74c..6bee2ce 100644 --- a/002b_special_instrumented_cells_details.py +++ b/002b_special_instrumented_cells_details.py @@ -1,17 +1,15 @@ from __future__ import division -import cPickle as pickle +import sys import time import re import argparse +import cPickle as pickle import numpy as np import matplotlib.pyplot as plt -import sys -sys.path.append('../') - - - +if '..' not in sys.path: + sys.path.append('..') import LHCMeasurementTools.TimberManager as tm import LHCMeasurementTools.mystyle as ms @@ -253,10 +251,9 @@ arc_hist_dict = lhc_hist_dict['arcs'] # Plots -figs = [] title = 'Special instrumented cells for fill %i' % filln -fig = ms.figure(title, figs) +fig = ms.figure(title) fig.subplots_adjust(left=.1, right=.75, hspace=.38, wspace=.45) sp = None @@ -311,7 +308,7 @@ def round_to(arr, precision): bins = np.arange(round_to(arc_hist_total.min(),binwidth)-binwidth, round_to(arc_hist_total.max(),binwidth)+binwidth*3/2, binwidth) if separate_fig_for_hist: - fig = ms.figure('Special cells in histogram of heatloads', figs) + fig = ms.figure('Special cells in histogram of heatloads') sp = plt.subplot(2,2,1) else: sp = plt.subplot(2,2,4) @@ -334,7 +331,7 @@ def round_to(arr, precision): sp_ctr = ctr % 4 + 1 if sp_ctr == 1: title = 'Fill %i: Heat loads at %.1f hours' % (filln, avg_time_hrs) - fig = ms.figure(title, figs) + fig = ms.figure(title) sp = plt.subplot(2,2,sp_ctr) sp.hist(arc_hist_total, bins=bins, alpha=0.5, color='blue', weights=1./len(arc_hist_total)*np.ones_like(arc_hist_total)) sp.hist(data, bins=bins, color='green', alpha=0.5, weights=1./len(data)*np.ones_like(data)) @@ -377,7 +374,7 @@ def round_to(arr, precision): # Compare dipoles to quads -fig_dev = ms.figure('Compare devices', figs) +fig_dev = ms.figure('Compare devices') fig_dev.subplots_adjust(left=.06, right=.84, top=.93, hspace=.38, wspace=.46) # Logged data @@ -461,7 +458,7 @@ def round_to(arr, precision): for cell_ctr, cell in enumerate(cells): sp_ctr = cell_ctr % 3 + 1 if sp_ctr == 1: - fig = ms.figure('HL dict' + title, figs) + fig = ms.figure('HL dict' + title) fig.subplots_adjust(left=.06, right=.84, top=.93, hspace=.38, wspace=.42) sp = plt.subplot(3,1,sp_ctr, sharex=sp) sp.set_xlabel('Fill number') @@ -522,7 +519,7 @@ def round_to(arr, precision): re_var = re.compile('^\w{4}_\w?(\d\d[RL]\d_TT\d{3})\.POSST$') title = 'Fill %i temperature sensors' % filln - fig = ms.figure(title, figs) + fig = ms.figure(title) fig.subplots_adjust(left=.06, right=.84, top=.93, hspace=.38, wspace=.42) for cell_ctr, cell in enumerate(cells): acell = alternate_notation[cell] @@ -557,7 +554,7 @@ def round_to(arr, precision): # Separate hl for b1, b2 if args.separate: title = 'Fill %i separate beam screens' % filln - fig = ms.figure(title, figs) + fig = ms.figure(title) fig.subplots_adjust(left=.06, right=.84, top=.93, hspace=.38, wspace=.42) qbs_special = cqs.compute_qbs_special(special_atd, separate=True) @@ -585,7 +582,7 @@ def round_to(arr, precision): ms.comb_legend(sp,sp2,bbox_to_anchor=(1.3,1), fontsize=myfontsz) if args.contributions: - fig = ms.figure('Heat load contributions', figs) + fig = ms.figure('Heat load contributions') fig.subplots_adjust(right=0.75, wspace=0.45, hspace=0.4) sp = None diff --git a/004_compare_qbs_versions.py b/004_compare_qbs_versions.py index 5ff03a6..6c42e6a 100644 --- a/004_compare_qbs_versions.py +++ b/004_compare_qbs_versions.py @@ -1,6 +1,8 @@ from __future__ import division import os import re +import argparse + import matplotlib.pyplot as plt import h5_storage @@ -10,12 +12,16 @@ import LHCMeasurementTools.mystyle as ms +parser = argparse.ArgumentParser() +parser.add_argument('filln', type=int) +parser.add_argument('--include-current', action='store_true', help='Recompute using the current code') +args = parser.parse_args() plt.close('all') ms.mystyle() -filln = 5219 -show_current = False +filln = args.filln +show_current = args.include_current re_dir = re.compile('recalculated_qbs_(nodP_)?v(\d+)') dirs = filter(re_dir.match, os.listdir(h5_storage.recalc_dir)) @@ -38,7 +44,7 @@ sps.append(sp) for ctr, (dir_, nodp, version) in enumerate(dir_dp_version): - use_dP = nodp == None + use_dP = (nodp == None) if use_dP: label = 'V%s with dP' % version else: diff --git a/007_compare_to_older_version.py b/007_compare_to_older_version.py deleted file mode 100644 index 32bec10..0000000 --- a/007_compare_to_older_version.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import division -import numpy as np -import matplotlib.pyplot as plt - -import qbs_fill as qf -import LHCMeasurementTools.mystyle as ms -import LHCMeasurementTools.LHC_Heatloads as HL -import LHCMeasurementTools.TimberManager as tm -from LHCMeasurementTools.SetOfHomogeneousVariables import SetOfHomogeneousNumericVariables - -ms.mystyle_arial() - -plt.close('all') -filln = 5219 - -version = 5 -old_version = 4 - -arc_key_list = HL.variable_lists_heatloads['AVG_ARC'] -quad_key_list = [] -for key, list_ in HL.variable_lists_heatloads.iteritems(): - if key[:2] in ('Q6', 'Q5'): - quad_key_list.extend(list_) - -qbs_ob = qf.compute_qbs_fill(filln, version=version) -qbs_ob_old = qf.compute_qbs_fill(filln, version=old_version) - -# Logged data -fill_dict = {} - - -data_dir = '/afs/cern.ch/work/l/lhcscrub/LHC_2016_25ns_beforeTS1/' -fill_dict.update(tm.parse_timber_file(data_dir + 'fill_heatload_data_csvs/heatloads_fill_%d.csv' % filln, verbose=False)) -heatloads = SetOfHomogeneousNumericVariables(variable_list=arc_key_list, timber_variables=fill_dict) - -arc_data = qf.compute_qbs_arc_avg(qbs_ob) -arc_data_old = qf.compute_qbs_arc_avg(qbs_ob_old) - -def tt_hrs(arr): - return (arr - arr[0])/3600. - -tt = tt_hrs(arc_data.timestamps) - -# Arcs -fig = ms.figure('Comparison of arc and quad heat load for fill %i' % filln) -fig.subplots_adjust(left=0.05, wspace=0.37) -sp = plt.subplot(2,2,1) -sp.grid('on') -sp.set_ylabel('Heat load [W]') -sp.set_xlabel('Time [h]') - -for ctr, (arc, data) in enumerate(arc_data.dictionary.iteritems()): - color = ms.colorprog(ctr, arc_data.dictionary) - sp.plot(tt, data, label=arc+' v%i'%version, color=color) - - if ctr == 0: - label_old = 'v%i' % old_version - label_logged = 'logged' - else: - label_old, label_logged = None, None - - sp.plot(tt, arc_data_old.dictionary[arc], ls ='--', color=color, label=label_old) - - for key in arc_key_list: - if arc in key: - sp.plot(tt_hrs(heatloads.timber_variables[key].t_stamps), heatloads.timber_variables[key].values, color=color, ls=':', label=label_logged) - -sp.legend(bbox_to_anchor=(1.2,1)) - -# Quads -sp_5 = plt.subplot(2,2,2) -sp_5.set_title('Q5') - -sp_6 = plt.subplot(2,2,4) -sp_6.set_title('Q6') -for sp in sp_5, sp_6: - sp.grid('on') - sp.set_ylabel('Heat load [W]') - sp.set_xlabel('Time [h]') - -fill_dict_v4 = qf.get_fill_dict(filln, version=4) -fill_dict_v5 = qf.get_fill_dict(filln, version=5) - -for fd, title, ls in zip((fill_dict, fill_dict_v4, fill_dict_v5), ('logged', 'v4', 'v5'), (':', '--', '-')): - heatloads = SetOfHomogeneousNumericVariables(variable_list=quad_key_list, timber_variables=fd) - for ctr, (key, tvl) in enumerate(heatloads.timber_variables.iteritems()): - if key[7] == '5': - sp = sp_5 - elif key[7] == '6': - sp = sp_6 - else: - raise ValueError - if ls == '-' or ctr == 0: - label = key[6:10] - else: - label = None - - color = ms.colorprog(ctr, heatloads.timber_variables) - - sp.plot(tt_hrs(tvl.t_stamps), tvl.values, ls=ls, label=label, color=color) - -for sp in sp_5, sp_6: - sp.legend(bbox_to_anchor=(1.2,1)) - - - -plt.show() diff --git a/008_play_with_compute_qbs_LHC.py b/008_play_with_compute_qbs_LHC.py index 2e499a1..6f504a3 100644 --- a/008_play_with_compute_qbs_LHC.py +++ b/008_play_with_compute_qbs_LHC.py @@ -1,3 +1,4 @@ +## Find out more about computation internals: What happens for different Reynold's numbers. import numpy as np import matplotlib.pyplot as plt #import sys diff --git a/009_investigate_gamma.py b/009_investigate_gamma.py index 7011516..4dfb727 100644 --- a/009_investigate_gamma.py +++ b/009_investigate_gamma.py @@ -15,9 +15,9 @@ import LHCMeasurementTools.mystyle as ms #import LHCMeasurementTools.savefig as sf -parser = argparse.ArgumentParser() +parser = argparse.ArgumentParser('For a typical fill, show values that are important to the computation, and how often they occur during a typical computation of a typical fill. Both at high energy and for the full duration.') parser.add_argument('--calc', help='Calculate instead of loading from pickle.', action='store_true') -parser.add_argument('--savefig', help='Save in file.') +parser.add_argument('--savefig', help='Save in files.') parser.add_argument('--noshow', help='Do not call plt.show', action='store_true') parser.add_argument('--onlyarcs', help='Only show arc_cells', action='store_true') parser.add_argument('--onlyinterp', help='Only show interp values', action='store_true') @@ -25,11 +25,8 @@ recompute = args.calc only_interp = args.onlyinterp -figs = [] - ms.mystyle() - plt.close('all') filln = 5219 @@ -51,7 +48,7 @@ for i in xrange(hlc.Ncell): gamma[j,i] = interp_P_T_gamma(P3[j,i], T3[j,i]) -fig = ms.figure('Interpolated Data', figs) +fig = ms.figure('Interpolated Data') interps = (interp_P_T_hPT, interp_P_T_DPT, interp_P_T_mu, interp_P_T_gamma) titles = ('Enthalpy', 'Density', 'Viscosity', 'Heat capacity ratio') @@ -179,7 +176,7 @@ affix = '(computed)' sp_ctr = ctr % 4 +1 if cc == 1 and sp_ctr == 1: - fig = ms.figure(plot_title, figs) + fig = ms.figure(plot_title) fig.subplots_adjust(right=0.75, ) fig_ctr += 1 if cc == 1: @@ -210,7 +207,8 @@ sp.legend(loc=0) if args.savefig: - for fig in figs: + for num in plt.get_fignums(): + fig = plt.figure(num) fig.savefig(os.path.expanduser(args.savefig) + '_%i.png' % fig.number) if not args.noshow: diff --git a/011_test_aligned_to_dict.py b/011_test_aligned_to_dict.py index 3a1713b..549d43e 100644 --- a/011_test_aligned_to_dict.py +++ b/011_test_aligned_to_dict.py @@ -1,3 +1,4 @@ +## Test if two methods in compute_QBS_special work. import numpy as np import qbs_fill as qf import compute_QBS_special as cqs @@ -5,7 +6,6 @@ filln = 5219 def compare_dicts_recursively(dd1, dd2): - equal = True for key, value1 in dd1.iteritems(): value2 = dd2[key] if type(value1) is np.ndarray: @@ -18,14 +18,14 @@ def compare_dicts_recursively(dd1, dd2): else: equal = value1 == value2 if not equal: - break - return equal + return False + return True qbs_dict = qf.special_qbs_fill(5219) -qbs_ob = cqs.dict_to_aligned(qbs_dict) +qbs_ob = cqs.dict_to_aligned_separate(qbs_dict) -qbs_dict_2 = cqs.aligned_to_dict(qbs_ob) +qbs_dict_2 = cqs.aligned_to_dict_separate(qbs_ob) print(compare_dicts_recursively(qbs_dict_2, qbs_dict)) @@ -33,3 +33,4 @@ def compare_dicts_recursively(dd1, dd2): qbs_dict_2['timestamps'][0] = 0 print(compare_dicts_recursively(qbs_dict_2, qbs_dict)) + diff --git a/012_test_pressure_drop.py b/012_test_pressure_drop.py index 7e73ce2..6c200e4 100644 --- a/012_test_pressure_drop.py +++ b/012_test_pressure_drop.py @@ -1,48 +1,47 @@ -from Pressure_drop import * -from config_qbs import config_qbs +from __future__ import division +import numpy as np import argparse +import matplotlib.pyplot as plt + +from Pressure_drop import calc_fl, calc_fr +from config_qbs import config_qbs -parser = argparse.ArgumentParser() +ticksize = 18 +plt.rcParams['font.size'] = ticksize +plt.rcParams['ytick.labelsize'] = ticksize +plt.rcParams['xtick.labelsize'] = ticksize + +parser = argparse.ArgumentParser('Show some internals of the pressure drop calculation.') parser.add_argument('-o', type=str, help='Path where to save the figure', default=None) parser.add_argument('--noshow', help='Do not make a plot', action='store_true') arguments = parser.parse_args() -if not arguments.noshow: - import matplotlib.pyplot as plt - plt.close('all') - - ln_Re = np.linspace(np.log(1e3), np.log(1e7), num=1000) - Re = np.exp(ln_Re) - fl = calc_fl(Re) - - try: - from RcParams import init_pyplot - init_pyplot() - except ImportError: - ticksize = 18 - plt.rcParams['font.size'] = ticksize - plt.rcParams['ytick.labelsize'] = ticksize - plt.rcParams['xtick.labelsize'] = ticksize - - fig = plt.figure() - fig.set_facecolor('w') - - sp = plt.subplot(1,1,1) - sp.plot(Re, fl, lw=2, label='f_L') - sp.grid(True) - sp.set_ylabel('fl', fontsize=18) - sp.set_xlabel('Re', fontsize=18) - sp.set_xscale('log') - for xx in [3e3, 1e5]: - sp.axvline(xx, color='black', lw=2, ls='--') - for xx in [4.89e3, 4.389e5]: - sp.axvline(xx, color='red', lw=1, ls='--') - fr = calc_fr(config_qbs.Radius, config_qbs.rug) - sp.axhline(fr, color='black', lw=2, label='f_R') - - sp.legend(loc=1) - - if arguments.o is None: - plt.show() - else: - fig.savefig(arguments.o, dpi=200) +plt.close('all') + +ln_Re = np.linspace(np.log(1e3), np.log(1e7), num=1000) +Re = np.exp(ln_Re) +fl = calc_fl(Re) + + +fig = plt.figure() +fig.set_facecolor('w') + +sp = plt.subplot(1,1,1) +sp.plot(Re, fl, lw=2, label='f_L') +sp.grid(True) +sp.set_ylabel('fl', fontsize=18) +sp.set_xlabel('Re', fontsize=18) +sp.set_xscale('log') +for xx in [3e3, 1e5]: + sp.axvline(xx, color='black', lw=2, ls='--') +for xx in [4.89e3, 4.389e5]: + sp.axvline(xx, color='red', lw=1, ls='--') +fr = calc_fr(config_qbs.Radius, config_qbs.rug) +sp.axhline(fr, color='black', lw=2, label='f_R') + +sp.legend(loc=1) + +if arguments.o is None: + plt.show() +else: + fig.savefig(arguments.o, dpi=200) diff --git a/013_investigate_delta_v6v7.py b/013_investigate_delta_v6v7.py deleted file mode 100644 index 9162928..0000000 --- a/013_investigate_delta_v6v7.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -if '..' not in sys.path: sys.path.append('..') -import operator - -import numpy as np - -import qbs_fill as qf -import hl_dicts.dict_utils as du - -main_dict = du.load_dict('../hl_dicts/test.pkl') - -versions = (6,7) -filln = 5219 -moment = 'stable_beams' - -qbs_obs = map(lambda v: qf.compute_qbs_fill(5219,version=v), versions) - -average_arcs = map(qf.compute_qbs_arc_avg, qbs_obs) - -dicts = [aa.dictionary for aa in average_arcs] - -diff = du.operate_on_dicts(*dicts, operator=operator.sub) - -index = np.argwhere(main_dict['filln'] == filln) -tt = main_dict[moment]['t_stamps'][index] - - -index_tt = np.argmin(np.abs(average_arcs[0].timestamps - tt)) - -for average_arc in average_arcs: - print tt - print average_arc.timestamps[index_tt] - for ctr, (arc, arr) in enumerate(sorted(average_arc.dictionary.items())): - hl_dict = main_dict[moment]['heat_load']['arc_averages'][arc][index] + main_dict['hl_subtracted_offset']['arc_averages'][arc][index] - print ('hl_dict', arc, hl_dict) - print arr[index_tt-1], arr[index_tt], arr[index_tt+1] - best_index = np.argmin(np.abs(arr - hl_dict)) - print(arr[best_index-1], arr[best_index+1], arr[best_index]) - - print - diff --git a/014_new_config_format.py b/014_new_config_format.py deleted file mode 100644 index dbb8468..0000000 --- a/014_new_config_format.py +++ /dev/null @@ -1,26 +0,0 @@ - -with open('./LHCCryoHeatLoadCalibration/CryoBeamScreenData.csv') as f: - f_new = f.readlines() - f_new = map(lambda x: x.split(','), f_new) - -with open('./config_qbs_lhc_3.csv') as f: - f_old = f.readlines() - f_old = map(lambda x: x.split(), f_old) - - -for i in xrange(len(f_new[0])): - #print f_new[0][i], f_new[1][i] - for j in xrange(len(f_old[0])): - if f_old[1][j] == f_new[1][i]: - print f_old[0][j], f_new[0][i] - - -cell_names = set() -print "duplicated" -for i, line in enumerate(f_new): - cell_name = line[3] - if cell_name in cell_names: - print cell_name - else: - cell_names.add(cell_name) - diff --git a/015_test_qbs_special_separate.py b/015_test_qbs_special_separate.py index 1aecf05..7ffa141 100644 --- a/015_test_qbs_special_separate.py +++ b/015_test_qbs_special_separate.py @@ -22,7 +22,7 @@ # --> Solved by excluding the last magnet from the calculation, and also adding 31L2 to the normal gasflow list as the temp sensors are reversed (826 instead of 824). # To be checked with B.B. -parser = argparse.ArgumentParser() +parser = argparse.ArgumentParser('The curves in one of the subplots should overlap.') parser.add_argument('filln', help='LHC fill number', type=int) parser.add_argument('--savefig', help='Save in file.', action='store_true') parser.add_argument('--noshow', help='Do not call plt.show', action='store_true') diff --git a/016_rename_vars_in_saved_h5.py b/016_rename_vars_in_saved_h5.py index cec39a2..a33550d 100644 --- a/016_rename_vars_in_saved_h5.py +++ b/016_rename_vars_in_saved_h5.py @@ -1,16 +1,19 @@ +import sys import os -import h5_storage + import h5py -import config_qbs as cq import numpy as np -#import LHCMeasurementTools.myfilemanager as mfm -#import LHCMeasurementTools.TimberManager as tm +import h5_storage +import config_qbs as cq dir_special = os.path.dirname(h5_storage.get_special_qbs_file(0)) h5_files_special = filter(lambda x: x.endswith('.h5'), os.listdir(dir_special)) h5_files_special = map(lambda x: dir_special + '/' + x, h5_files_special) +if raw_input('Continue renaming saved data? yes/no\n') not in ('yes', 'y'): + sys.exit() + # Rename 'qbs' to 'Qbs' #for h5_file in h5_files: diff --git a/compute_QBS_LHC.py b/compute_QBS_LHC.py index 54e959d..ef7a459 100644 --- a/compute_QBS_LHC.py +++ b/compute_QBS_LHC.py @@ -7,36 +7,159 @@ from valve_LT import valve_LT from Pressure_drop import pd_factory, calc_re from config_qbs import Config_qbs, config_qbs -from var_getter import VarGetter zeros = lambda *x: np.zeros(shape=(x), dtype=float) -max_iterations = 5 # For pressure drop -class HeatLoadComputer(VarGetter): +class HeatLoadComputer(object): + """ + This class can be used to relate the raw timber data for a given cell or all cells. + Parameters: + -atd_ob: Timber data + -version: Which config_qbs version to use + -strict: Raise error if there are missing variables. Default: True + -details: Print details of report() method. + -use_dP: Use pressure drop. This takes significantly longer. + -compute_Re: Compute reynold's number. (Not used any longer). + -only_raw_data: Only load the pressures, temperatures etc but do not compute anything. + """ - def __init__(self, atd_ob, version=h5_storage.version, strict=True, details=False, use_dP=True, compute_Re=False): + max_iterations = 5 # For pressure drop + + def __init__(self, atd_ob, version=h5_storage.version, strict=True, details=False, use_dP=True, compute_Re=False, only_raw_data=False): + + # Initialization if version == h5_storage.version: cq = config_qbs else: cq = Config_qbs(version) - super(HeatLoadComputer,self).__init__(atd_ob, cq, strict, report=False) + self.atd_ob = atd_ob + self.cq = cq self.use_dP = use_dP + self.strict = strict + + self.Ncell = len(cq.Cell_list) + self.Nvalue = len(atd_ob.timestamps) + + self.problem_cells = {} + self.missing_variables = [] + self.nan_arr = np.zeros(len(cq.Cell_list), dtype=np.bool) + + # Read input variables: pressures temperatures, electrical heater etc. + self._store_all_cell_data() + # Optionally stop here if no computation is desired + if only_raw_data: + self.report(details=details) + return + + # Main computation self.computed_values = {} self._compute_ro(use_P3=False) + if self.use_dP: self._compute_P3() self._compute_ro(use_P3=True) self._compute_H() self._compute_heat_load() + if compute_Re: self._compute_Re() + + # Some sanity checks. Then report failing cells to stdout. self.assure() self.report(details=details) + # Create object that holds data self.qbs_atd = tm.AlignedTimberData(atd_ob.timestamps, self.computed_values['qbs'], cq.Cell_list) + def _store_all_cell_data(self): + """ + Create a data_dict that contains all raw data. + """ + + cq = self.cq + + # correct: If no data available, copy it from previous cell. + # negative: This data may be negative. + self.var_data_dict = { + 'T1': { + 'vars': cq.TT961_list, + 'correct': True, + 'negative': False, + }, + 'T3': { + 'vars': cq.TT94x_list, + 'correct': False, + 'negative': False, + }, + 'CV': { + 'vars': cq.CV94x_list, + 'correct': False, + 'negative': False, + }, + 'EH': { + 'vars': cq.EH84x_list, + 'correct': False, + 'negative': True, + }, + 'P1': { + 'vars': cq.PT961_list, + 'correct': True, + 'negative': False, + }, + 'P4': { + 'vars': cq.PT991_list, + 'correct': True, + 'negative': False, + }, + 'T2': { + 'vars': cq.TT84x_list, + 'correct': False, + 'negative': False, + }, + } + data_dict = {} + for key, dd in self.var_data_dict.iteritems(): + arr = np.zeros((self.Nvalue, self.Ncell), dtype=float) + negative_allowed = dd['negative'] + can_be_corrected = dd['correct'] + + correct_first = False + for cell_ctr, var_name in enumerate(dd['vars']): + try: + data = self.atd_ob.dictionary[var_name] + except KeyError: + self.missing_variables.append(var_name) + continue + + arr[:,cell_ctr] = data + if (negative_allowed and np.all(arr[:,cell_ctr] == 0)) or (not negative_allowed and np.all(arr[:,cell_ctr] <= 0)): + if can_be_corrected: + if cell_ctr != 0: + arr[:,cell_ctr] = arr[:,cell_ctr-1] + self._insert_to_problem_cells(cell_ctr, var_name, 'corrected') + else: + correct_first = True + else: + self._insert_to_problem_cells(cell_ctr, var_name, 'no_data') + self.nan_arr[cell_ctr] = True + elif not negative_allowed and np.any(arr[:,cell_ctr] <= 0): + self._insert_to_problem_cells(cell_ctr, var_name, 'negative') + + if correct_first: + arr[:,0] = arr[:,-1] + + data_dict[key] = arr + + if self.missing_variables: + print('Warning! Some variables are missing!') + print(self.missing_variables) + if self.strict: + raise ValueError('Missing variables!') + self.data_dict = data_dict + + def _compute_H(self): """ Computes h3, hC @@ -65,6 +188,9 @@ def _compute_H(self): self.computed_values['h3'] = h3 def _compute_ro(self, use_P3): + """ + Computes helium density. + """ if use_P3: P = self.computed_values['P3'] else: @@ -81,6 +207,9 @@ def _compute_ro(self, use_P3): self.computed_values['ro'] = ro def _compute_Re(self): + """ + Computes Reynold's number. + """ m_L = self.computed_values['m_L'] D = 2*self.cq.Radius T_avg = (self.data_dict['T2'] + self.data_dict['T3'])/2. @@ -98,7 +227,7 @@ def _compute_Re(self): def _compute_P3(self): """ - Iterative computation of P3. + Iterative computation of P3 (pressure drop). """ P3_arr = zeros(self.Nvalue, self.Ncell) @@ -125,12 +254,12 @@ def _compute_P3(self): for j in xrange(self.Nvalue): P3_temp = 0 - counter_int = 0 + iterations = 0 P3 = P1[j,i] #iterative loop to compute the pressure drop with error of 1% or until max iteration - while abs((P3_temp-P3)/P3) > 0.01 and counter_int < max_iterations: - counter_int += 1 + while abs((P3_temp-P3)/P3) > 0.01 and iterations < self.max_iterations: + iterations += 1 #protect against P3 < P4 if P3 < P4[j,i]: break @@ -149,12 +278,15 @@ def _compute_P3(self): else: P3_arr[j,i] = P3_temp - if counter_int == max_iterations: + if iterations == self.max_iterations: self._insert_to_problem_cells(i, j, 'max_iter') self.computed_values['P3'] = P3_arr def _compute_heat_load(self): + """ + Final step: mass flow and heat load. + """ Qs_list = self.cq.Qs_list Kv_list = self.cq.Kv_list @@ -187,34 +319,73 @@ def _compute_heat_load(self): self.computed_values['qbs'] = qbs def get_single_cell_data(self, cell): + """ + Returns recomputed and raw data for one single cell. + """ output_dict = {} - for index, c in enumerate(self.cq.Cell_list): - if cell in c: - break + candidate_cells = filter(lambda x: cell in x, list(self.cq.Cell_list)) + if len(candidate_cells) == 0: + raise ValueError('Cell %s not found!' % cell) + elif len(candidate_cells) != 1: + raise ValueError('Too many cells with %s: %s' % (cell, candidate_cells)) else: - raise ValueError('Cell not found!') + index = list(self.cq.Cell_list).index(candidate_cells[0]) + for key, arr in self.computed_values.iteritems(): output_dict[key] = arr[:,index] - output_dict.update(super(HeatLoadComputer, self).get_single_cell_data(cell)) + for key, arr in self.data_dict.iteritems(): + output_dict[key] = arr[:,index] return output_dict def assure(self): """ + Check conditions: + P1 > P4 m_L > 0 """ - super(HeatLoadComputer, self).assure() + + P1 = self.data_dict['P1'] + P4 = self.data_dict['P4'] + for cell_ctr, isnan in enumerate(self.nan_arr): + if not isnan and np.any(P1[:,cell_ctr] < P4[:,cell_ctr]): + self._insert_to_problem_cells(cell_ctr, 'P1 < P4', 'failed_checks') m_L = self.computed_values['m_L'] - #qbs = self.computed_values['qbs'] for cell_ctr, isnan in enumerate(self.nan_arr): if not isnan: if np.any(m_L[:,cell_ctr] < 0): self._insert_to_problem_cells(cell_ctr, 'm_L', 'negative') - #if np.any(qbs[:,cell_ctr] < 0): - # self._insert_to_problem_cells(cell_ctr, 'qbs', 'negative') + def report(self, details=False): + """ + Print out the missing and corrected variables as well as the nan cells. + """ + + for type_, dd in self.problem_cells.iteritems(): + print('%i problems of type %s' % (len(dd), type_)) + if details: + for cell, subdict in dd.iteritems(): + print('%s in S%s of type %s: %s' % (cell, subdict['sector'], subdict['type'], subdict['list'])) + + def _insert_to_problem_cells(self, cell_ctr, var, type_): + """ + Utility to store what kind of problems and for which cells they occur. + """ + problem_cells = self.problem_cells + cell = self.cq.Cell_list[cell_ctr] + if type_ not in problem_cells: + problem_cells[type_] = {} + if cell not in problem_cells[type_]: + problem_cells[type_][cell] = { + 'sector': self.cq.Sector_list[cell_ctr], + 'type': self.cq.Type_list[cell_ctr], + 'list': set(), + } + problem_cells[type_][cell]['list'].add(var) + +# Main interface of this file def compute_qbs(atd_ob, use_dP, version=h5_storage.version, strict=True, details=False): hl_comp = HeatLoadComputer(atd_ob, version=version, strict=strict, use_dP=use_dP, details=details) return hl_comp.qbs_atd diff --git a/compute_QBS_magnet.py b/compute_QBS_magnet.py index 9aa2034..e958603 100644 --- a/compute_QBS_magnet.py +++ b/compute_QBS_magnet.py @@ -19,7 +19,7 @@ def __init__(self, interp_P_T_hPT, atd, P1, m_L, cell_list): self.data_dictionary = atd.dictionary def Compute_QBS_magnet(self, cell, Tin_list, Tout_list): - print('Warning! Obsolete function Compute_QBS_magnet is called') + print('Warning! Obsolete function Compute_QBS_magnet is called. Call Compute_QBS_magnet_single instead!') cell_index = self.cell_list.index(cell) if not isinstance(Tin_list, list): Tin_list = [Tin_list] diff --git a/compute_QBS_special.py b/compute_QBS_special.py index 80bbfbc..9ad6135 100644 --- a/compute_QBS_special.py +++ b/compute_QBS_special.py @@ -236,27 +236,41 @@ def aligned_to_dict(qbs_ob): def aligned_to_dict_separate(qbs_ob): output = {} output['timestamps'] = qbs_ob.timestamps - output['cells'] = cell_list - - # create structure - for cell in cell_list: - output[cell] = {} - for magnet_id in magnet_ids: - output[cell][magnet_id] = {} + cells = set() # reverse dict to aligned seperate for var, arr in qbs_ob.dictionary.iteritems(): split_var = var.split('_') if len(split_var) == 3: cell, magnet_id, beam = split_var + + if cell not in output: + output[cell] = {} + if magnet_id not in output[cell]: + output[cell][magnet_id] = {} + output[cell][magnet_id][int(beam)] = arr elif len(split_var) == 2: cell, key = split_var + if cell not in output: + output[cell] = {} + if key in ('Qbs', 'Sum'): output[cell][key] = arr elif key in magnet_ids: + if key not in output[cell]: + output[cell][key] = {} + output[cell][key]['Sum'] = arr else: raise ValueError(key) + cells.add(cell) + + if set(cell_list) == cells: + cells = cell_list + elif set(cell_list_pre_EYETS16) == cells: + cells = cell_list_pre_EYETS16 + + output['cells'] = cells return output diff --git a/qbs_fill.py b/qbs_fill.py index f0a79e2..2033ec7 100644 --- a/qbs_fill.py +++ b/qbs_fill.py @@ -25,8 +25,6 @@ def compute_qbs_fill(filln, use_dP=True, version=default_version, recompute_if_m if filln < 3600: version = -1 print 'Warning in GasflowHLCalculator.qbs_fill: special case for pre LS1 fills. Specified version is ignored.' - else: - version = default_version h5_file = h5_storage.get_qbs_file(filln, version=version, use_dP=use_dP) if os.path.isfile(h5_file): diff --git a/var_getter.py b/var_getter.py deleted file mode 100644 index d536fd2..0000000 --- a/var_getter.py +++ /dev/null @@ -1,158 +0,0 @@ -from __future__ import division, print_function -import numpy as np - -class VarGetter(object): - """ - This class can be used to relate the raw timber data for a given cell or all cells. - Parameters: - -atd_ob: Timber data - -cq : Compute_qbs instance - -strict: Raise error if there are missing variables. Default: True - -report: Print information on failed sensors. - """ - def __init__(self, atd_ob, cq, strict=True, report=False): - - self.atd_ob = atd_ob - self.cq = cq - self.strict = strict - - self.Ncell = len(cq.Cell_list) - self.Nvalue = len(atd_ob.timestamps) - - self.problem_cells = {} - self.missing_variables = [] - self.nan_arr = np.zeros(len(cq.Cell_list), dtype=np.bool) - - # correct: If no data available, copy it from previous cell. - # negative: This data may be negative. - self.var_data_dict = { - 'T1': { - 'vars': cq.TT961_list, - 'correct': True, - 'negative': False, - }, - 'T3': { - 'vars': cq.TT94x_list, - 'correct': False, - 'negative': False, - }, - 'CV': { - 'vars': cq.CV94x_list, - 'correct': False, - 'negative': False, - }, - 'EH': { - 'vars': cq.EH84x_list, - 'correct': False, - 'negative': True, - }, - 'P1': { - 'vars': cq.PT961_list, - 'correct': True, - 'negative': False, - }, - 'P4': { - 'vars': cq.PT991_list, - 'correct': True, - 'negative': False, - }, - 'T2': { - 'vars': cq.TT84x_list, - 'correct': False, - 'negative': False, - }, - } - - self.data_dict = self._store_all_cell_data() - if report: - VarGetter.assure(self) - VarGetter.report(self) - - def _store_all_cell_data(self): - data_dict = {} - for key, dd in self.var_data_dict.iteritems(): - arr = np.zeros((self.Nvalue, self.Ncell), dtype=float) - negative_allowed = dd['negative'] - - correct_first = False - for cell_ctr, var_name in enumerate(dd['vars']): - try: - data = self.atd_ob.dictionary[var_name] - except KeyError: - self.missing_variables.append(var_name) - continue - - arr[:,cell_ctr] = data - if (negative_allowed and np.all(arr[:,cell_ctr] == 0)) or \ - (not negative_allowed and np.all(arr[:,cell_ctr] <= 0)): - if dd['correct']: - if cell_ctr != 0: - arr[:,cell_ctr] = arr[:,cell_ctr-1] - self._insert_to_problem_cells(cell_ctr, var_name, 'corrected') - else: - correct_first = True - else: - self._insert_to_problem_cells(cell_ctr, var_name, 'no_data') - self.nan_arr[cell_ctr] = True - elif not negative_allowed and np.any(arr[:,cell_ctr] <= 0): - self._insert_to_problem_cells(cell_ctr, var_name, 'negative') - - if correct_first: - arr[:,0] = arr[:,-1] - - data_dict[key] = arr - - if self.missing_variables: - print('Warning! Some variables are missing!') - print(self.missing_variables) - if self.strict: - raise ValueError('Missing variables!') - return data_dict - - def get_single_cell_data(self, cell): - """ - Returns a dictionary with the data for every variable type - """ - output_dict = {} - for index, c in enumerate(self.cq.Cell_list): - if cell in c: - break - else: - raise ValueError('Cell not found!') - for key, arr in self.data_dict.iteritems(): - output_dict[key] = arr[:,index] - return output_dict - - def _insert_to_problem_cells(self, cell_ctr, var, type_): - problem_cells = self.problem_cells - cell = self.cq.Cell_list[cell_ctr] - if type_ not in problem_cells: - problem_cells[type_] = {} - if cell not in problem_cells[type_]: - problem_cells[type_][cell] = { - 'sector': self.cq.Sector_list[cell_ctr], - 'type': self.cq.Type_list[cell_ctr], - 'list': set(), - } - problem_cells[type_][cell]['list'].add(var) - - def assure(self): - """ - P1 > P4 - """ - P1 = self.data_dict['P1'] - P4 = self.data_dict['P4'] - for cell_ctr, isnan in enumerate(self.nan_arr): - if not isnan and np.any(P1[:,cell_ctr] < P4[:,cell_ctr]): - self._insert_to_problem_cells(cell_ctr, 'P1 < P4', 'failed_checks') - - def report(self, details=False): - """ - Print out the missing and corrected variables as well as the nan cells. - """ - for type_, dd in self.problem_cells.iteritems(): - print('%i problems of type %s' % (len(dd), type_)) - if details: - for cell, subdict in dd.iteritems(): - print('%s in S%s of type %s: %s' % (cell, subdict['sector'], subdict['type'], subdict['list'])) -