From d44ee761932cbe9315c3ea10aafadb20d234406c Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Mon, 9 Dec 2024 13:19:34 -0800 Subject: [PATCH 01/30] One transformer set for each fold and for train_valid dataset --- atomsci/ddm/pipeline/model_pipeline.py | 2 +- atomsci/ddm/pipeline/model_wrapper.py | 147 +++++++++++++----------- atomsci/ddm/pipeline/perf_data.py | 72 ++++++------ atomsci/ddm/pipeline/transformations.py | 27 +++++ 4 files changed, 144 insertions(+), 104 deletions(-) diff --git a/atomsci/ddm/pipeline/model_pipeline.py b/atomsci/ddm/pipeline/model_pipeline.py index d6862709..70dd7f84 100644 --- a/atomsci/ddm/pipeline/model_pipeline.py +++ b/atomsci/ddm/pipeline/model_pipeline.py @@ -301,7 +301,7 @@ def load_featurize_data(self, params=None): # is fitted to the training data only. The transformers are then applied to the training, # validation and test sets separately. if not params.split_only: - self.model_wrapper.create_transformers(self.data) + self.model_wrapper.create_transformers(trans.get_all_training_datasets(self.data)) else: self.run_mode = '' diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index b788a43e..a9d7fc35 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -283,9 +283,9 @@ def __init__(self, params, featurizer, ds_client): self.output_dir = self.params.output_dir self.model_dir = os.path.join(self.output_dir, 'model') os.makedirs(self.model_dir, exist_ok=True) - self.transformers = [] - self.transformers_x = [] - self.transformers_w = [] + self.transformers = {} + self.transformers_x = {} + self.transformers_w = {} # **************************************************************************************** @@ -336,7 +336,7 @@ def _create_output_transformers(self, model_dataset): """ # TODO: Just a warning, we may have response transformers for classification datasets in the future if self.params.prediction_type=='regression' and self.params.transformers is True: - self.transformers = [trans.NormalizationTransformerMissingData(transform_y=True, dataset=model_dataset.dataset)] + return [trans.NormalizationTransformerMissingData(transform_y=True, dataset=model_dataset.dataset)] # **************************************************************************************** @@ -351,15 +351,15 @@ def _create_feature_transformers(self, model_dataset): transformers_x: A list of deepchem transformation objects on featurizers, only if conditions are met. """ # Set up transformers for features, if needed - self.transformers_x = trans.create_feature_transformers(self.params, model_dataset) + return trans.create_feature_transformers(self.params, model_dataset) # **************************************************************************************** - def create_transformers(self, model_dataset): + def create_transformers(self, training_datasets): """Initialize transformers for responses, features and weights, and persist them for later. Args: - model_dataset: The ModelDataset object that handles the current dataset + training_datasets: The ModelDataset object that handles the current dataset Side effects: Overwrites the attributes: @@ -372,22 +372,23 @@ def create_transformers(self, model_dataset): params.transformer_key: A string pointing to the dataset key containing the transformer in the datastore, or the path to the transformer """ - self._create_output_transformers(model_dataset) + for k, td in training_datasets.items(): + self.transformers[k] = self._create_output_transformers(td) - self._create_feature_transformers(model_dataset) + self.transformers_x[k] = self._create_feature_transformers(td) - # Set up transformers for weights, if needed - self.transformers_w = trans.create_weight_transformers(self.params, model_dataset) + # Set up transformers for weights, if needed + self.transformers_w[k] = trans.create_weight_transformers(self.params, td) - if len(self.transformers) + len(self.transformers_x) + len(self.transformers_w) > 0: + if len(self.transformers[k]) + len(self.transformers_x[k]) + len(self.transformers_w[k]) > 0: - # Transformers are no longer saved as separate datastore objects; they are included in the model tarball - self.params.transformer_key = os.path.join(self.output_dir, 'transformers.pkl') - with open(self.params.transformer_key, 'wb') as txfmrpkl: - pickle.dump((self.transformers, self.transformers_x, self.transformers_w), txfmrpkl) - self.log.info("Wrote transformers to %s" % self.params.transformer_key) - self.params.transformer_oid = "" - self.params.transformer_bucket = "" + # Transformers are no longer saved as separate datastore objects; they are included in the model tarball + self.params.transformer_key = os.path.join(self.output_dir, f'transformers_{k}.pkl') + with open(self.params.transformer_key, 'wb') as txfmrpkl: + pickle.dump((self.transformers[k], self.transformers_x[k], self.transformers_w[k]), txfmrpkl) + self.log.info("Wrote transformers to %s" % self.params.transformer_key) + self.params.transformer_oid = "" + self.params.transformer_bucket = "" # **************************************************************************************** @@ -400,58 +401,69 @@ def reload_transformers(self): # Try local path first to check for transformers unpacked from model tarball if not trans.transformers_needed(self.params): return - local_path = f"{self.output_dir}/transformers.pkl" - if os.path.exists(local_path): - self.log.info(f"Reloading transformers from model tarball {local_path}") - with open(local_path, 'rb') as txfmr: - transformers_tuple = pickle.load(txfmr) - else: - if self.params.transformer_key is not None: - if self.params.save_results: - self.log.info(f"Reloading transformers from datastore key {self.params.transformer_key}") - transformers_tuple = dsf.retrieve_dataset_by_datasetkey( - dataset_key = self.params.transformer_key, - bucket = self.params.transformer_bucket, - client = self.ds_client ) - else: - self.log.info(f"Reloading transformers from file {self.params.transformer_key}") - with open(self.params.transformer_key, 'rb') as txfmr: - transformers_tuple = pickle.load(txfmr) + + for i in trans.get_transformer_keys(): + # for backwards compatibity if this file exists, all folds use the same transformers + local_path = f"{self.output_dir}/transformers.pkl" + if not os.path.exists(local_path): + local_path = f"{self.output_dir}/transformers_{i}.pkl" + + if os.path.exists(local_path): + self.log.info(f"Reloading transformers from model tarball {local_path}") + with open(local_path, 'rb') as txfmr: + transformers_tuple = pickle.load(txfmr) else: - # Shouldn't happen - raise Exception("Transformers needed to reload model, but no transformer_key specified.") + if self.params.transformer_key is not None: + if self.params.save_results: + self.log.info(f"Reloading transformers from datastore key {self.params.transformer_key}") + transformers_tuple = dsf.retrieve_dataset_by_datasetkey( + dataset_key = self.params.transformer_key, + bucket = self.params.transformer_bucket, + client = self.ds_client ) + else: + self.log.info(f"Reloading transformers from file {self.params.transformer_key}") + with open(self.params.transformer_key, 'rb') as txfmr: + transformers_tuple = pickle.load(txfmr) + else: + # Shouldn't happen + raise Exception("Transformers needed to reload model, but no transformer_key specified.") - if len(transformers_tuple) == 3: - self.transformers, self.transformers_x, self.transformers_w = transformers_tuple - else: - self.transformers, self.transformers_x = transformers_tuple - self.transformers_w = [] + if len(transformers_tuple) == 3: + ty, tx, tw = transformers_tuple + else: + ty, tx = transformers_tuple + tw = [] + + self.transformers[i] = ty + self.transformers_x[i] = tx + self.transformers_w[i] = tw # **************************************************************************************** - def transform_dataset(self, dataset): + def transform_dataset(self, dataset, fold=0): """Transform the responses and/or features in the given DeepChem dataset using the current transformers. Args: dataset: The DeepChem DiskDataset that contains a dataset + fold (int): Which fold is being transformed. Returns: transformed_dataset: The transformed DeepChem DiskDataset """ transformed_dataset = dataset - if len(self.transformers) > 0: + if len(self.transformers[fold]) > 0: self.log.info("Transforming response data") - for transformer in self.transformers: + for transformer in self.transformers[fold]: transformed_dataset = transformer.transform(transformed_dataset) - if len(self.transformers_x) > 0: + if len(self.transformers_x[fold]) > 0: self.log.info("Transforming feature data") - for transformer in self.transformers_x: + for transformer in self.transformers_x[fold]: transformed_dataset = transformer.transform(transformed_dataset) - if len(self.transformers_w) > 0: + if len(self.transformers_w[fold]) > 0: self.log.info("Transforming weights") - for transformer in self.transformers_w: + for transformer in self.transformers_w[fold]: transformed_dataset = transformer.transform(transformed_dataset) return transformed_dataset @@ -486,7 +498,7 @@ def get_train_valid_pred_results(self, perf_data): return perf_data.get_prediction_results() # **************************************************************************************** - def get_test_perf_data(self, model_dir, model_dataset): + def get_test_perf_data(self, model_dir, model_dataset, fold): """Returns the predicted values and metrics for the current test dataset against the version of the model stored in model_dir, as a PerfData object. @@ -506,14 +518,15 @@ def get_test_perf_data(self, model_dir, model_dataset): # We pass transformed=False to indicate that the preds and uncertainties we get from # generate_predictions are already untransformed, so that perf_data.get_prediction_results() # doesn't untransform them again. - if hasattr(self.transformers[0], "ishybrid"): + if hasattr(self.transformers[0][0], "ishybrid"): # indicate that we are training a hybrid model + # ASDF need to know what to pass in as the y transform now that they are fold dependent. perf_data = perf.create_perf_data("hybrid", model_dataset, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False) else: perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'test', transformed=False) test_dset = model_dataset.test_dset test_preds, test_stds = self.generate_predictions(test_dset) - _ = perf_data.accumulate_preds(test_preds, test_dset.ids, test_stds) + _ = perf_data.accumulate_preds(test_preds, test_dset.ids, test_stds, fold=fold) return perf_data # **************************************************************************************** @@ -532,7 +545,7 @@ def get_test_pred_results(self, model_dir, model_dataset): return perf_data.get_prediction_results() # **************************************************************************************** - def get_full_dataset_perf_data(self, model_dataset): + def get_full_dataset_perf_data(self, model_dataset, fold): """Returns the predicted values and metrics from the current model for the full current dataset, as a PerfData object. @@ -555,7 +568,7 @@ def get_full_dataset_perf_data(self, model_dataset): else: perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'full', transformed=False) full_preds, full_stds = self.generate_predictions(model_dataset.dataset) - _ = perf_data.accumulate_preds(full_preds, model_dataset.dataset.ids, full_stds) + _ = perf_data.accumulate_preds(full_preds, model_dataset.dataset.ids, full_stds, fold) return perf_data # **************************************************************************************** @@ -913,10 +926,10 @@ def train_kfold_cv(self, pipeline): train_pred = self.model.predict(train_dset, []) test_pred = self.model.predict(test_dset, []) - train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids) - test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids) + train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids, fold=k) + test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids, fold=k) - valid_perf = em.accumulate(ei, subset='valid', dset=valid_dset) + valid_perf = em.accumulate(ei, subset='valid', dset=valid_dset, fold=k) self.log.info("Fold %d, epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) @@ -939,7 +952,7 @@ def train_kfold_cv(self, pipeline): for ei in range(self.best_epoch+1): self.model.fit(fit_dataset, nb_epoch=1, checkpoint_interval=0, restore=False) - train_perf, test_perf = em.update_epoch(ei, train_dset=fit_dataset, test_dset=test_dset) + train_perf, test_perf = em.update_epoch(ei, train_dset=fit_dataset, test_dset=test_dset, fold='train_valid') self.log.info(f"Combined folds: Epoch {ei}, training {pipeline.metric_type} = {train_perf:.3}," + f"test {pipeline.metric_type} = {test_perf:.3}") @@ -999,7 +1012,7 @@ def train_with_early_stopping(self, pipeline): # saved will be the one we created intentionally when we reached a new best validation score. self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0) train_perf, valid_perf, test_perf = em.update_epoch(ei, - train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset) + train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold=0) self.log.info("Epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, @@ -1455,7 +1468,7 @@ def train(self, pipeline): valid_loss_ep /= (valid_data.n_ki + valid_data.n_bind) train_perf, valid_perf, test_perf = em.update_epoch(ei, - train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset) + train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold=0) self.log.info("Epoch %d: training %s = %.3f, training loss = %.3f, validation %s = %.3f, validation loss = %.3f, test %s = %.3f" % ( ei, pipeline.metric_type, train_perf, train_loss_ep, pipeline.metric_type, valid_perf, valid_loss_ep, @@ -1650,13 +1663,13 @@ def train(self, pipeline): self.model.fit(train_dset) train_pred = self.model.predict(train_dset, []) - train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids) + train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids, fold=k) valid_pred = self.model.predict(valid_dset, []) - valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids) + valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids, fold=k) test_pred = self.model.predict(test_dset, []) - test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids) + test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids, fold=k) self.log.info("Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) @@ -2069,13 +2082,13 @@ def train(self, pipeline): self.model.fit(train_dset) train_pred = self.model.predict(train_dset, []) - train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids) + train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids, fold=k) valid_pred = self.model.predict(valid_dset, []) - valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids) + valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids, fold=k) test_pred = self.model.predict(test_dset, []) - test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids) + test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids, fold=k) self.log.info("Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) diff --git a/atomsci/ddm/pipeline/perf_data.py b/atomsci/ddm/pipeline/perf_data.py index 12b7bcb8..5863ca22 100644 --- a/atomsci/ddm/pipeline/perf_data.py +++ b/atomsci/ddm/pipeline/perf_data.py @@ -132,7 +132,7 @@ def __init__(self, model_dataset, subset): """Initialize any attributes that are common to all PerfData subclasses""" # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -217,7 +217,7 @@ def __init__(self, model_dataset, subset): self.weights = None # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -406,7 +406,7 @@ def __init__(self, model_dataset, subset): self.weights = None # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -631,7 +631,7 @@ def __init__(self, model_dataset, subset): self.weights = None # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -945,7 +945,7 @@ def __init__(self, model_dataset, transformers, subset, transformed=True): # **************************************************************************************** # class KFoldRegressionPerfData - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Add training, validation or test set predictions from the current fold to the data structure where we keep track of them. @@ -989,7 +989,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): self.pred_vals[id] = np.concatenate([self.pred_vals[id], predicted_vals[i,:].reshape((1,-1))], axis=0) self.folds += 1 - pred_vals = dc.trans.undo_transforms(predicted_vals, self.transformers) + pred_vals = dc.trans.undo_transforms(predicted_vals, self.transformers[fold]) real_vals = self.get_real_values(ids) weights = self.get_weights(ids) @@ -1023,15 +1023,15 @@ def get_pred_values(self): ids = sorted(self.pred_vals.keys()) if self.subset in ['train', 'test', 'train_valid']: rawvals = np.concatenate([self.pred_vals[id].mean(axis=0, keepdims=True).reshape((1,-1)) for id in ids]) - vals = dc.trans.undo_transforms(rawvals, self.transformers) + vals = dc.trans.undo_transforms(rawvals, self.transformers[fold]) if self.folds > 1: stds = dc.trans.undo_transforms(np.concatenate([self.pred_vals[id].std(axis=0, keepdims=True).reshape((1,-1)) - for id in ids]), self.transformers) + for id in ids]), self.transformers[fold]) else: stds = None else: rawvals = np.concatenate([self.pred_vals[id].reshape((1,-1)) for id in ids], axis=0) - vals = dc.trans.undo_transforms(rawvals, self.transformers) + vals = dc.trans.undo_transforms(rawvals, self.transformers[fold]) stds = None return (ids, vals, stds) @@ -1053,7 +1053,7 @@ def get_real_values(self, ids=None): if ids is None: ids = sorted(self.pred_vals.keys()) real_vals = np.concatenate([self.real_vals[id].reshape((1,-1)) for id in ids], axis=0) - return dc.trans.undo_transforms(real_vals, self.transformers) + return dc.trans.undo_transforms(real_vals, self.transformers[fold]) # **************************************************************************************** @@ -1210,7 +1210,7 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran # **************************************************************************************** # class KFoldClassificationPerfData - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Add training, validation or test set predictions from the current fold to the data structure where we keep track of them. @@ -1245,7 +1245,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): task_real_vals = np.squeeze(real_vals[nzrows,i,:]) task_class_probs = dc.trans.undo_transforms( np.squeeze(class_probs[nzrows,i,:]), - self.transformers) + self.transformers[fold]) scores.append(roc_auc_score(task_real_vals, task_class_probs, average='macro')) else: # For binary classifier, sklearn metrics functions are expecting single array of 1s and 0s for real_vals_list, @@ -1253,7 +1253,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): task_real_vals = np.squeeze(real_vals[nzrows,i]) task_class_probs = dc.trans.undo_transforms( np.squeeze(class_probs[nzrows,i,1]), - self.transformers) + self.transformers[fold]) scores.append(roc_auc_score(task_real_vals, task_class_probs)) self.perf_metrics.append(np.array(scores)) return float(np.mean(scores)) @@ -1284,11 +1284,11 @@ def get_pred_values(self): #prob_stds = np.concatenate([dc.trans.undo_transforms(self.pred_vals[id], self.transformers).std(axis=0, keepdims=True) # for id in ids], axis=0) class_probs = dc.trans.undo_transforms(np.concatenate([self.pred_vals[id].mean(axis=0, keepdims=True) - for id in ids], axis=0), self.transformers) + for id in ids], axis=0), self.transformers[fold]) prob_stds = dc.trans.undo_transforms(np.concatenate([self.pred_vals[id].std(axis=0, keepdims=True) - for id in ids], axis=0), self.transformers) + for id in ids], axis=0), self.transformers[fold]) else: - class_probs = np.concatenate([dc.trans.undo_transforms(self.pred_vals[id], self.transformers) for id in ids], axis=0) + class_probs = np.concatenate([dc.trans.undo_transforms(self.pred_vals[id], self.transformers[fold]) for id in ids], axis=0) prob_stds = None pred_classes = np.argmax(class_probs, axis=2) return (ids, pred_classes, class_probs, prob_stds) @@ -1450,7 +1450,7 @@ def __init__(self, model_dataset, transformers, subset, transformed=True): # **************************************************************************************** # class SimpleRegressionPerfData - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Add training, validation or test set predictions to the data structure where we keep track of them. @@ -1469,7 +1469,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): self.pred_vals = self._reshape_preds(predicted_vals) if pred_stds is not None: self.pred_stds = self._reshape_preds(pred_stds) - pred_vals = dc.trans.undo_transforms(self.pred_vals, self.transformers) + pred_vals = dc.trans.undo_transforms(self.pred_vals, self.transformers[fold]) real_vals = self.get_real_values(ids) weights = self.get_weights(ids) scores = [] @@ -1497,15 +1497,15 @@ def get_pred_values(self): stds (np.array): Contains (ncmpds, ntasks) array of prediction standard deviations """ - vals = dc.trans.undo_transforms(self.pred_vals, self.transformers) + vals = dc.trans.undo_transforms(self.pred_vals, self.transformers[fold]) stds = None if self.pred_stds is not None: stds = self.pred_stds - if len(self.transformers) == 1 and (isinstance(self.transformers[0], dc.trans.NormalizationTransformer) or isinstance(self.transformers[0],trans.NormalizationTransformerMissingData)): + if len(self.transformers[fold]) == 1 and (isinstance(self.transformers[fold][0], dc.trans.NormalizationTransformer) or isinstance(self.transformers[fold][0],trans.NormalizationTransformerMissingData)): # Untransform the standard deviations, if we can. This is a bit of a hack, but it works for # NormalizationTransformer, since the standard deviations used to scale the data are # stored in the transformer object. - y_stds = self.transformers[0].y_stds.reshape((1,-1,1)) + y_stds = self.transformers[fold][0].y_stds.reshape((1,-1,1)) stds = stds / y_stds return (self.ids, vals, stds) @@ -1523,7 +1523,7 @@ def get_real_values(self, ids=None): np.array: Containing the real dataset response values with transformations undone. """ - return dc.trans.undo_transforms(self.real_vals, self.transformers) + return dc.trans.undo_transforms(self.real_vals, self.transformers[fold]) # **************************************************************************************** @@ -1687,7 +1687,7 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran # **************************************************************************************** # class SimpleClassificationPerfData - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Add training, validation or test set predictions from the current dataset to the data structure where we keep track of them. @@ -1716,7 +1716,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): task_real_vals = np.squeeze(real_vals[nzrows,i,:]) task_class_probs = dc.trans.undo_transforms( np.squeeze(class_probs[nzrows,i,:]), - self.transformers) + self.transformers[fold]) scores.append(roc_auc_score(task_real_vals, task_class_probs, average='macro')) else: # For binary classifier, sklearn metrics functions are expecting single array of 1s and 0s for real_vals_list, @@ -1724,7 +1724,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): task_real_vals = np.squeeze(real_vals[nzrows,i]) task_class_probs = dc.trans.undo_transforms( np.squeeze(class_probs[nzrows,i,1]), - self.transformers) + self.transformers[fold]) scores.append(roc_auc_score(task_real_vals, task_class_probs)) self.perf_metrics.append(np.array(scores)) return float(np.mean(scores)) @@ -1752,7 +1752,7 @@ class probability estimates. prob_stds (np.array): Contains (ncmpds, ntasks, nclasses) array of standard errors for the class probability estimates """ - class_probs = dc.trans.undo_transforms(self.pred_vals, self.transformers) + class_probs = dc.trans.undo_transforms(self.pred_vals, self.transformers[fold]) pred_classes = np.argmax(class_probs, axis=2) prob_stds = self.pred_stds return (self.ids, pred_classes, class_probs, prob_stds) @@ -1907,7 +1907,7 @@ def __init__(self, model_dataset, transformers, subset, is_ki, ki_convert_ratio= # **************************************************************************************** # class SimpleHybridPerfData - def accumulate_preds(self, predicted_vals, ids, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): """Add training, validation or test set predictions to the data structure where we keep track of them. @@ -2013,7 +2013,7 @@ def get_real_values(self, ids=None): np.array: Containing the real dataset response values with transformations undone. """ - return self.transformers[0].untransform(self.real_vals) + return self.transformers[fold][0].untransform(self.real_vals) # **************************************************************************************** @@ -2160,7 +2160,7 @@ def should_stop(self): # **************************************************************************************** # class EpochManager - def update_epoch(self, ei, train_dset=None, valid_dset=None, test_dset=None): + def update_epoch(self, ei, fold, train_dset=None, valid_dset=None, test_dset=None): """Update training state after an epoch This function updates train/valid/test_perf_data. Call this function once @@ -2186,15 +2186,15 @@ def update_epoch(self, ei, train_dset=None, valid_dset=None, test_dset=None): This function updates self._should_stop """ - train_perf = self.update(ei, 'train', train_dset) - valid_perf = self.update(ei, 'valid', valid_dset) - test_perf = self.update(ei, 'test', test_dset) + train_perf = self.update(ei, 'train', train_dset, fold) + valid_perf = self.update(ei, 'valid', valid_dset, fold) + test_perf = self.update(ei, 'test', test_dset, fold) return [p for p in [train_perf, valid_perf, test_perf] if p is not None] # **************************************************************************************** # class EpochManager - def accumulate(self, ei, subset, dset): + def accumulate(self, ei, subset, dset, fold): """Accumulate predictions Makes predictions, accumulate predictions and calculate the performance metric. Calls PerfData.accumulate_preds @@ -2211,7 +2211,7 @@ def accumulate(self, ei, subset, dset): float: Performance metric for the given dset. """ pred = self._make_pred(dset) - perf = getattr(self.wrapper, f'{subset}_perf_data')[ei].accumulate_preds(pred, dset.ids) + perf = getattr(self.wrapper, f'{subset}_perf_data')[ei].accumulate_preds(pred, dset.ids, fold) return perf # **************************************************************************************** @@ -2270,7 +2270,7 @@ def update_valid(self, ei): # **************************************************************************************** # class EpochManager - def update(self, ei, subset, dset=None): + def update(self, ei, subset, fold, dset=None): """Update training state Updates the training state for a given subset and epoch index with the given dataset. @@ -2289,7 +2289,7 @@ def update(self, ei, subset, dset=None): if dset is None: return None - perf = self.accumulate(ei, subset, dset) + perf = self.accumulate(ei, subset, dset, fold) self.compute(ei, subset) if subset == 'valid': diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index 5b8ca7a8..e94ec261 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -138,7 +138,34 @@ def get_transformer_specific_metadata(params): return meta_dict # **************************************************************************************** +def get_transformer_keys(params): + """Makes all transformer keys + There is one set of transformers for each fold and then one transformer + for both validation and training sets. AMPL automatically trains a model + using all validation and training data at the end of the training loop. + """ + if params.split_strategy == 'k_fold_cv': + return [0, 'train_val'] + else: + return list(range(params.num_folds))+['train_val'] + +# **************************************************************************************** +def get_all_training_datasets(model_dataset): + """Returns all 'training' datasets + This takes a model_dataset and returns a dictionary of all + datasets that will need a transformer. The keys will match + what is returned by get_transformer_keys + """ + result = {} + # First, get the training set from all the folds + for i, t in enumerate(model_dataset.train_valid_dsets): + result[i] = t + + # Next, add the dataset that contains all training+validation data + result['train_val'] = model_dataset.combined_training_data() + +# **************************************************************************************** class UMAPTransformer(Transformer): """Dimension reduction transformations using the UMAP algorithm. From 10675c60c4b09be0fba33e40ae7b5803bdb21718 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Mon, 9 Dec 2024 13:39:08 -0800 Subject: [PATCH 02/30] Returned all_training_datasets --- atomsci/ddm/pipeline/transformations.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index e94ec261..83e958c7 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -164,6 +164,8 @@ def get_all_training_datasets(model_dataset): # Next, add the dataset that contains all training+validation data result['train_val'] = model_dataset.combined_training_data() + return result + # **************************************************************************************** class UMAPTransformer(Transformer): From 71d08595713ca51f701374d193964982b1c00610 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Mon, 9 Dec 2024 13:56:46 -0800 Subject: [PATCH 03/30] creating transformers now expects a DeepChem dataset instead of a model_dataset --- atomsci/ddm/pipeline/model_wrapper.py | 12 ++++++------ atomsci/ddm/pipeline/transformations.py | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index a9d7fc35..43a7bc43 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -324,7 +324,7 @@ def get_model_specific_metadata(self): raise NotImplementedError # **************************************************************************************** - def _create_output_transformers(self, model_dataset): + def _create_output_transformers(self, dataset): """Initialize transformers for responses and persist them for later. Args: @@ -336,11 +336,11 @@ def _create_output_transformers(self, model_dataset): """ # TODO: Just a warning, we may have response transformers for classification datasets in the future if self.params.prediction_type=='regression' and self.params.transformers is True: - return [trans.NormalizationTransformerMissingData(transform_y=True, dataset=model_dataset.dataset)] + return [trans.NormalizationTransformerMissingData(transform_y=True, dataset=dataset)] # **************************************************************************************** - def _create_feature_transformers(self, model_dataset): + def _create_feature_transformers(self, dataset): """Initialize transformers for features, and persist them for later. Args: @@ -351,7 +351,7 @@ def _create_feature_transformers(self, model_dataset): transformers_x: A list of deepchem transformation objects on featurizers, only if conditions are met. """ # Set up transformers for features, if needed - return trans.create_feature_transformers(self.params, model_dataset) + return trans.create_feature_transformers(self.params, dataset) # **************************************************************************************** @@ -1589,7 +1589,7 @@ def get_model_specific_metadata(self): return model_spec_metadata # **************************************************************************************** - def _create_output_transformers(self, model_dataset): + def _create_output_transformers(self, dataset): """Initialize transformers for responses and persist them for later. Args: @@ -1601,7 +1601,7 @@ def _create_output_transformers(self, model_dataset): """ # TODO: Just a warning, we may have response transformers for classification datasets in the future if self.params.prediction_type=='regression' and self.params.transformers is True: - self.transformers = [trans.NormalizationTransformerHybrid(transform_y=True, dataset=model_dataset.dataset)] + self.transformers = [trans.NormalizationTransformerHybrid(transform_y=True, dataset=dataset)] # **************************************************************************************** class ForestModelWrapper(ModelWrapper): diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index 83e958c7..877c1ea5 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -92,7 +92,7 @@ def create_feature_transformers(params, model_dataset): return transformers_x # **************************************************************************************** -def create_weight_transformers(params, model_dataset): +def create_weight_transformers(params, dataset): """Fit an optional balancing transformation to the weight matrix of the given dataset, and return a DeepChem transformer object holding its parameters. @@ -106,7 +106,7 @@ def create_weight_transformers(params, model_dataset): """ if params.weight_transform_type == 'balancing': if params.prediction_type == 'classification': - transformers_w = [BalancingTransformer(model_dataset.dataset)] + transformers_w = [BalancingTransformer(dataset)] else: log.warning("Warning: Balancing transformer only supported for classification models.") transformers_w = [] From a2520a469be88101b105c68dc720df8ce6af4ef4 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Tue, 10 Dec 2024 08:25:12 -0800 Subject: [PATCH 04/30] Added fold as a parameter to more functions, 'final' is the name of the default fold, transformers only see one Dataset during creation, updated unit tests --- atomsci/ddm/pipeline/model_wrapper.py | 48 +++++++++++-------- atomsci/ddm/pipeline/perf_data.py | 36 +++++++------- atomsci/ddm/pipeline/perf_plots.py | 8 ++-- atomsci/ddm/pipeline/transformations.py | 46 ++++++++++++------ .../test/integrative/hybrid/test_hybrid.py | 2 +- atomsci/ddm/test/unit/test_model_wrapper.py | 42 ++++++++-------- .../ddm/utils/hyperparam_search_wrapper.py | 2 +- 7 files changed, 104 insertions(+), 80 deletions(-) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index 43a7bc43..2ff7ccee 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -283,9 +283,9 @@ def __init__(self, params, featurizer, ds_client): self.output_dir = self.params.output_dir self.model_dir = os.path.join(self.output_dir, 'model') os.makedirs(self.model_dir, exist_ok=True) - self.transformers = {} - self.transformers_x = {} - self.transformers_w = {} + self.transformers = trans.get_blank_transformations() + self.transformers_x = trans.get_blank_transformations() + self.transformers_w = trans.get_blank_transformations() # **************************************************************************************** @@ -351,7 +351,7 @@ def _create_feature_transformers(self, dataset): transformers_x: A list of deepchem transformation objects on featurizers, only if conditions are met. """ # Set up transformers for features, if needed - return trans.create_feature_transformers(self.params, dataset) + return trans.create_feature_transformers(self.params, self.featurization, dataset) # **************************************************************************************** @@ -402,7 +402,7 @@ def reload_transformers(self): if not trans.transformers_needed(self.params): return - for i in trans.get_transformer_keys(): + for i in trans.get_transformer_keys(self.params): # for backwards compatibity if this file exists, all folds use the same transformers local_path = f"{self.output_dir}/transformers.pkl" if not os.path.exists(local_path): @@ -441,7 +441,7 @@ def reload_transformers(self): # **************************************************************************************** - def transform_dataset(self, dataset, fold=0): + def transform_dataset(self, dataset, fold='final'): """Transform the responses and/or features in the given DeepChem dataset using the current transformers. Args: @@ -518,7 +518,7 @@ def get_test_perf_data(self, model_dir, model_dataset, fold): # We pass transformed=False to indicate that the preds and uncertainties we get from # generate_predictions are already untransformed, so that perf_data.get_prediction_results() # doesn't untransform them again. - if hasattr(self.transformers[0][0], "ishybrid"): + if hasattr(self.transformers['final'][0], "ishybrid"): # indicate that we are training a hybrid model # ASDF need to know what to pass in as the y transform now that they are fold dependent. perf_data = perf.create_perf_data("hybrid", model_dataset, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False) @@ -562,7 +562,7 @@ def get_full_dataset_perf_data(self, model_dataset, fold): # We pass transformed=False to indicate that the preds and uncertainties we get from # generate_predictions are already untransformed, so that perf_data.get_prediction_results() # doesn't untransform them again. - if hasattr(self.transformers[0], "ishybrid"): + if hasattr(self.transformers['final'][0], "ishybrid"): # indicate that we are training a hybrid model perf_data = perf.create_perf_data("hybrid", model_dataset, self.transformers, 'full', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False) else: @@ -1012,7 +1012,7 @@ def train_with_early_stopping(self, pipeline): # saved will be the one we created intentionally when we reached a new best validation score. self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0) train_perf, valid_perf, test_perf = em.update_epoch(ei, - train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold=0) + train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold='final') self.log.info("Epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, @@ -1123,14 +1123,18 @@ def generate_predictions(self, dataset): # =-=ksm: The second 'isinstance' shouldn't be necessary since NormalizationTransformerMissingData # is a subclass of dc.trans.NormalizationTransformer. - if len(self.transformers) == 1 and (isinstance(self.transformers[0], dc.trans.NormalizationTransformer) - or isinstance(self.transformers[0],trans.NormalizationTransformerMissingData)): - y_stds = self.transformers[0].y_stds.reshape((1,ntasks,1)) + if len(self.transformers) == 1 and (isinstance(self.transformers['final'][0], dc.trans.NormalizationTransformer) + or isinstance(self.transformers['final'][0],trans.NormalizationTransformerMissingData)): + y_stds = self.transformers['final'][0].y_stds.reshape((1,ntasks,1)) std = std / y_stds - pred = dc.trans.undo_transforms(pred, self.transformers) + pred = dc.trans.undo_transforms(pred, self.transformers['final']) else: # Classification models and regression models without uncertainty are handled here - txform = [] if (not self.params.transformers or self.transformers is None) else self.transformers + if (not self.params.transformers or self.transformers is None): + txform = [] + else: + txform = self.transformers['final'] + pred = self.model.predict(dataset, txform) if self.params.prediction_type == 'regression': if isinstance(pred, list) and len(pred) == 0: @@ -1277,8 +1281,9 @@ def _l2_loss(self, yp, yr): if len(pos_bind[0]) == 0: return loss_ki, torch.tensor(0.0, dtype=torch.float32) # convert Ki to % binding - y_stds = self.transformers[0].y_stds - y_means = self.transformers[0].y_means + # assumes no folds + y_stds = self.transformers['final'][0].y_stds + y_means = self.transformers['final'][0].y_means if self.params.is_ki: bind_pred = self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1]) else: @@ -1305,8 +1310,9 @@ def _poisson_hybrid_loss(self, yp, yr): # Compute L2 loss for pKi predictions loss_ki = torch.sum((yp[pos_ki, 0] - yr[pos_ki, 0]) ** 2) #convert the ki prediction back to Ki scale - y_stds = self.transformers[0].y_stds - y_means = self.transformers[0].y_means + #hybrid models do not support kfold validation + y_stds = self.transformers['final'][0].y_stds + y_means = self.transformers['final'][0].y_means # Compute fraction bound to *radioligand* (not drug) from predicted pKi if self.params.is_ki: rl_bind_pred = 1 - self._predict_binding(y_means + y_stds * yp[pos_bind, 0], conc=yr[pos_bind, 1]) @@ -1468,7 +1474,7 @@ def train(self, pipeline): valid_loss_ep /= (valid_data.n_ki + valid_data.n_bind) train_perf, valid_perf, test_perf = em.update_epoch(ei, - train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold=0) + train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold='final') self.log.info("Epoch %d: training %s = %.3f, training loss = %.3f, validation %s = %.3f, validation loss = %.3f, test %s = %.3f" % ( ei, pipeline.metric_type, train_perf, train_loss_ep, pipeline.metric_type, valid_perf, valid_loss_ep, @@ -1558,11 +1564,11 @@ def generate_predictions(self, dataset): if self.params.transformers and self.transformers is not None: if has_conc: pred = np.concatenate((pred, real[:, [1]]), axis=1) - pred = self.transformers[0].untransform(pred, isreal=False) + pred = self.transformers['final'][0].untransform(pred, isreal=False) pred_bind_pos = np.where(~np.isnan(pred[:, 1]))[0] pred[pred_bind_pos, 0] = self._predict_binding(pred[pred_bind_pos, 0], pred[pred_bind_pos, 1]) else: - pred = self.transformers[0].untransform(pred, isreal=False) + pred = self.transformers['final'][0].untransform(pred, isreal=False) else: if has_conc: pred = np.concatenate((pred, real[:, [1]]), axis=1) diff --git a/atomsci/ddm/pipeline/perf_data.py b/atomsci/ddm/pipeline/perf_data.py index 5863ca22..48271d32 100644 --- a/atomsci/ddm/pipeline/perf_data.py +++ b/atomsci/ddm/pipeline/perf_data.py @@ -139,14 +139,14 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): raise NotImplementedError # **************************************************************************************** - def get_pred_values(self): + def get_pred_values(self, fold): """Raises: NotImplementedError: The method is implemented by subclasses """ raise NotImplementedError # **************************************************************************************** - def get_real_values(self, ids=None): + def get_real_values(self, fold, ids=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -170,7 +170,7 @@ def compute_perf_metrics(self, per_task=False): raise NotImplementedError # **************************************************************************************** - def get_prediction_results(self): + def get_prediction_results(self, fold): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -224,7 +224,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): raise NotImplementedError # **************************************************************************************** - def get_pred_values(self): + def get_pred_values(self, fold): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -257,7 +257,7 @@ def model_choice_score(self, score_type='r2'): """ ids, pred_vals, stds = self.get_pred_values() - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values('train_valid', ids) weights = self.get_weights(ids) scores = [] for i in range(self.num_tasks): @@ -274,7 +274,7 @@ def model_choice_score(self, score_type='r2'): # **************************************************************************************** # class RegressionPerfData - def get_prediction_results(self): + def get_prediction_results(self, fold): """Returns a dictionary of performance metrics for a regression model. The dictionary values should contain only primitive Python types, so that it can be easily JSONified. @@ -303,8 +303,8 @@ def get_prediction_results(self): # and then averaging the metrics. If people start asking for SDs of MAE and RMSE scores over folds, # we'll change the code to compute all metrics the same way. - (ids, pred_vals, pred_stds) = self.get_pred_values() - real_vals = self.get_real_values(ids) + (ids, pred_vals, pred_stds) = self.get_pred_values(fold=fold) + real_vals = self.get_real_values(ids, fold=fold) weights = self.get_weights(ids) mae_scores = [] rms_scores = [] @@ -477,7 +477,7 @@ def model_choice_score(self, score_type='r2'): # **************************************************************************************** # class HybridPerfData - def get_prediction_results(self): + def get_prediction_results(self, fold): """Returns a dictionary of performance metrics for a regression model. The dictionary values should contain only primitive Python types, so that it can be easily JSONified. @@ -707,7 +707,7 @@ def model_choice_score(self, score_type='roc_auc'): # **************************************************************************************** # class ClassificationPerfData - def get_prediction_results(self): + def get_prediction_results(self, fold): """Returns a dictionary of performance metrics for a classification model. The dictionary values will contain only primitive Python types, so that it can be easily JSONified. @@ -1005,7 +1005,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): # **************************************************************************************** # class KFoldRegressionPerfData - def get_pred_values(self): + def get_pred_values(self, fold): """Returns the predicted values accumulated over training, with any transformations undone. If self.subset is 'train' or 'test', the function will return averages over the training folds for each compound along with standard deviations when there are predictions from multiple folds. Otherwise, returns a @@ -1038,7 +1038,7 @@ def get_pred_values(self): # **************************************************************************************** # class KFoldRegressionPerfData - def get_real_values(self, ids=None): + def get_real_values(self, fold, ids=None): """Returns the real dataset response values, with any transformations undone, as an (ncmpds, ntasks) array in the same ID order as get_pred_values() (unless ids is specified). @@ -1260,7 +1260,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): # **************************************************************************************** # class KFoldClassificationPerfData - def get_pred_values(self): + def get_pred_values(self, fold): """Returns the predicted values accumulated over training, with any transformations undone. If self.subset is 'train', 'train_valid' or 'test', the function will return the means and standard deviations of the class probabilities over the training folds for each compound, for each task. Otherwise, returns a single set of predicted probabilites for @@ -1484,7 +1484,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): # **************************************************************************************** # class SimpleRegressionPerfData - def get_pred_values(self): + def get_pred_values(self, fold): """Returns the predicted values accumulated over training, with any transformations undone. Returns a tuple (ids, values, stds), where ids is the list of compound IDs, values is a (ncmpds, ntasks) array of predictions, and stds is always None for this class. @@ -1512,7 +1512,7 @@ def get_pred_values(self): # **************************************************************************************** # class SimpleRegressionPerfData - def get_real_values(self, ids=None): + def get_real_values(self, fold, ids=None): """Returns the real dataset response values, with any transformations undone, as an (ncmpds, ntasks) array with compounds in the same ID order as in the return from get_pred_values(). @@ -1731,7 +1731,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): # **************************************************************************************** # class SimpleClassificationPerfData - def get_pred_values(self): + def get_pred_values(self, fold): """Returns the predicted values accumulated over training, with any transformations undone. If self.subset is 'train', the function will average class probabilities over the k-1 folds in which each compound was part of the training set, and return the most probable class. Otherwise, there should be a @@ -1979,7 +1979,7 @@ def _predict_binding(self, activity, conc): # **************************************************************************************** # class SimpleHybridPerfData - def get_pred_values(self): + def get_pred_values(self, fold): """Returns the predicted values accumulated over training, with any transformations undone. Returns a tuple (ids, values, stds), where ids is the list of compound IDs, values is a (ncmpds, ntasks) array of predictions, and stds is always None for this class. @@ -2002,7 +2002,7 @@ def get_pred_values(self): # **************************************************************************************** # class SimpleHybridPerfData - def get_real_values(self, ids=None): + def get_real_values(self, fold, ids=None): """Returns the real dataset response values, with any transformations undone, as an (ncmpds, ntasks) array with compounds in the same ID order as in the return from get_pred_values(). diff --git a/atomsci/ddm/pipeline/perf_plots.py b/atomsci/ddm/pipeline/perf_plots.py index 6fcdcb04..2f705c12 100644 --- a/atomsci/ddm/pipeline/perf_plots.py +++ b/atomsci/ddm/pipeline/perf_plots.py @@ -138,7 +138,7 @@ def plot_pred_vs_actual(model, epoch_label='best', threshold=None, error_bars=Fa subs_pred_df=pred_df[pred_df.subset==subset] perf_data = wrapper.get_perf_data(subset, epoch_label) - pred_results = perf_data.get_prediction_results() + pred_results = perf_data.get_prediction_results('final') # pred_df=pfm.predict_from_pipe(model) std=len([x for x in pred_df.columns if 'std' in x]) > 0 if perf_data.num_tasks > 1: @@ -164,9 +164,9 @@ def plot_pred_vs_actual(model, epoch_label='best', threshold=None, error_bars=Fa # % binding / inhibition data, with one row per subset. for s, subset in enumerate(subsets): perf_data = wrapper.get_perf_data(subset, epoch_label) - pred_results = perf_data.get_prediction_results() - y_actual = perf_data.get_real_values() - ids, y_pred, y_std = perf_data.get_pred_values() + pred_results = perf_data.get_prediction_results('final') + y_actual = perf_data.get_real_values('final') + ids, y_pred, y_std = perf_data.get_pred_values('final') r2 = pred_results['r2_score'] if perf_data.num_tasks > 1: r2_scores = pred_results['task_r2_scores'] diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index 877c1ea5..a9143f1c 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -38,10 +38,14 @@ def get_statistics_missing_ydata(dataset): values for the y variable only. The x variable still assumes no missing values. """ - y_means = np.zeros(len(dataset.get_task_names())) - y_m2 = np.zeros(len(dataset.get_task_names())) - dy = np.zeros(len(dataset.get_task_names())) - n = np.zeros(len(dataset.get_task_names())) + if len(dataset.y.shape)==1: + num_tasks = 1 + else: + num_tasks = dataset.y.shape[1] + y_means = np.zeros(num_tasks) + y_m2 = np.zeros(num_tasks) + dy = np.zeros(num_tasks) + n = np.zeros(num_tasks) for _, y, w, _ in dataset.itersamples(): for it in range(len(y)) : ## set weights to 0 for missing data @@ -61,7 +65,7 @@ def get_statistics_missing_ydata(dataset): return y_means, y_stds # **************************************************************************************** -def create_feature_transformers(params, model_dataset): +def create_feature_transformers(params, featurization, train_dset): """Fit a scaling and centering transformation to the feature matrix of the given dataset, and return a DeepChem transformer object holding its parameters. @@ -78,14 +82,13 @@ def create_feature_transformers(params, model_dataset): log.warning("UMAP feature transformation is deprecated and will be removed in a future release.") if params.split_strategy == 'k_fold_cv': log.warning("Warning: UMAP transformation may produce misleading results when used with K-fold split strategy.") - train_dset = model_dataset.train_valid_dsets[0][0] transformers_x = [UMAPTransformer(params, train_dset)] elif params.transformers: # TODO: Transformers on responses and features should be controlled only by parameters # response_transform_type and feature_transform_type, rather than params.transformers. # Scale and center feature matrix if featurization type calls for it - transformers_x = model_dataset.featurization.create_feature_transformer(model_dataset.dataset) + transformers_x = featurization.create_feature_transformer(train_dset) else: transformers_x = [] @@ -144,10 +147,17 @@ def get_transformer_keys(params): for both validation and training sets. AMPL automatically trains a model using all validation and training data at the end of the training loop. """ - if params.split_strategy == 'k_fold_cv': - return [0, 'train_val'] + if params.split_strategy != 'k_fold_cv': + return ['final'] else: - return list(range(params.num_folds))+['train_val'] + return list(range(params.num_folds))+['final'] + +# **************************************************************************************** +def get_blank_transformations(): + """Get empty transformations dictionary + These keys must always exist, even when there are no transformations + """ + return {'final':[]} # **************************************************************************************** def get_all_training_datasets(model_dataset): @@ -157,12 +167,18 @@ def get_all_training_datasets(model_dataset): what is returned by get_transformer_keys """ result = {} - # First, get the training set from all the folds - for i, t in enumerate(model_dataset.train_valid_dsets): - result[i] = t + if model_dataset.splitting is None: + # this dataset is not split into training and validation, use all data + result['final'] = model_dataset.dataset + elif len(model_dataset.train_valid_dsets)==1: + result['final'] = model_dataset.train_valid_dsets[0] + else: + # First, get the training set from all the folds + for i, t in enumerate(model_dataset.train_valid_dsets): + result[i] = t - # Next, add the dataset that contains all training+validation data - result['train_val'] = model_dataset.combined_training_data() + # Next, add the dataset that contains all training+validation data + result['final'] = model_dataset.combined_training_data() return result diff --git a/atomsci/ddm/test/integrative/hybrid/test_hybrid.py b/atomsci/ddm/test/integrative/hybrid/test_hybrid.py index cc504103..d05255a3 100644 --- a/atomsci/ddm/test/integrative/hybrid/test_hybrid.py +++ b/atomsci/ddm/test/integrative/hybrid/test_hybrid.py @@ -52,7 +52,7 @@ def test(): print("Check the model performance on validation data") pred_data = pl.model_wrapper.get_perf_data(subset="valid", epoch_label="best") - pred_results = pred_data.get_prediction_results() + pred_results = pred_data.get_prediction_results('final') print(pred_results) pred_score = pred_results['r2_score'] diff --git a/atomsci/ddm/test/unit/test_model_wrapper.py b/atomsci/ddm/test/unit/test_model_wrapper.py index 6c512fea..0b270987 100644 --- a/atomsci/ddm/test/unit/test_model_wrapper.py +++ b/atomsci/ddm/test/unit/test_model_wrapper.py @@ -7,6 +7,7 @@ import atomsci.ddm.pipeline.model_datasets as model_dataset import atomsci.ddm.pipeline.model_wrapper as model_wrapper import atomsci.ddm.pipeline.model_pipeline as MP +import atomsci.ddm.pipeline.transformations as trans import utils_testing as utils import copy @@ -43,22 +44,22 @@ #*********************************************************************************** def test_create_model_wrapper(): """ - Args: + Args: params (Namespace) : Parameters passed to the model pipeline - featurizer (Featurization): Object managing the featurization of compounds - ds_client (DatastoreClient): Interface to the file datastore + featurizer (Featurization): Object managing the featurization of compounds + ds_client (DatastoreClient): Interface to the file datastore - Returns: - model (pipeline.Model): Wrapper for DeepChem, sklearn or other model. + Returns: + model (pipeline.Model): Wrapper for DeepChem, sklearn or other model. - Raises: -ValueError: Only params.model_type = 'NN' or 'RF' is supported. + Raises: + ValueError: Only params.model_type = 'NN' or 'RF' is supported. -Dependencies: -None + Dependencies: + None -Calls: -MultitaskDCModelWrapper, DCRFModelWrapper + Calls: + MultitaskDCModelWrapper, DCRFModelWrapper """ inp_params = parse.wrapper(general_params) featurization = feat.create_featurization(inp_params) @@ -72,8 +73,8 @@ def test_create_model_wrapper(): test.append(mdl.output_dir == inp_params.output_dir) test.append(mdl.model_dir == inp_params.output_dir + '/' + 'model') test.append(mdl.best_model_dir == inp_params.output_dir + '/' + 'best_model') - test.append(mdl.transformers == []) - test.append(mdl.transformers_x == []) + test.append(mdl.transformers['final'] == []) + test.append(mdl.transformers_x['final'] == []) test.append(isinstance(mdl, model_wrapper.MultitaskDCModelWrapper)) # testing for correct attribute initialization with model_type == "RF" @@ -137,20 +138,21 @@ def test_super_create_transformers(): test = [] test.append(mdl.params.prediction_type == 'regression') test.append(mdl.params.model_type == 'NN') - mdl.create_transformers(data_obj_ecfp) - test.append(isinstance(mdl.transformers[0], dc.trans.transformers.NormalizationTransformer)) - test.append(mdl.transformers_x == []) + mdl.create_transformers(trans.get_all_training_datasets(data_obj_ecfp)) + test.append(isinstance(mdl.transformers['final'][0], dc.trans.transformers.NormalizationTransformer)) + test.append(mdl.transformers_x['final'] == []) #testing saving of transformer to correct location: - transformer_path = os.path.join(mdl.output_dir, 'transformers.pkl') + transformer_path = os.path.join(mdl.output_dir, 'transformers_final.pkl') test.append(os.path.isfile(transformer_path)) + # TODO: test proper saving of the transformer to the datastore # TODO: test when transformers is False: inp_params.prediction_type = 'classification' mdl = model_wrapper.create_model_wrapper(inp_params, featurization) - test.append(mdl.transformers == []) - test.append(mdl.transformers_x == []) + test.append(mdl.transformers['final'] == []) + test.append(mdl.transformers_x['final'] == []) assert all(test) @@ -182,7 +184,7 @@ def test_super_transform_dataset(): data_obj_ecfp.get_featurized_data() mdl = model_wrapper.create_model_wrapper(inp_params, data_obj_ecfp.featurization) mdl.setup_model_dirs() - mdl.create_transformers(data_obj_ecfp) + mdl.create_transformers(trans.get_all_training_datasets(data_obj_ecfp)) dataset = mdl.transform_dataset(data_obj_ecfp.dataset) test = [] diff --git a/atomsci/ddm/utils/hyperparam_search_wrapper.py b/atomsci/ddm/utils/hyperparam_search_wrapper.py index 264ec6d9..9c5e6861 100755 --- a/atomsci/ddm/utils/hyperparam_search_wrapper.py +++ b/atomsci/ddm/utils/hyperparam_search_wrapper.py @@ -1564,7 +1564,7 @@ def lossfn(p): for subset in subsets: if not model_failed: perf_data = pl.model_wrapper.get_perf_data(subset=subset, epoch_label="best") - sub_pred_results = perf_data.get_prediction_results() + sub_pred_results = perf_data.get_prediction_results('final') else: if tparam.prediction_type == "regression": sub_pred_results = {"r2_score": 0, "rms_score": 100} From 18987d36ee27cd26b2952b8f4d2f0cc6972c9185 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Tue, 10 Dec 2024 14:58:38 -0800 Subject: [PATCH 05/30] No more transformers in perf_data, No more transformers in EpochManager, transforms happen right before it's used. Transformations are un-done before passing into perf_data --- atomsci/ddm/pipeline/model_datasets.py | 27 +- atomsci/ddm/pipeline/model_pipeline.py | 13 +- atomsci/ddm/pipeline/model_wrapper.py | 130 +++++---- atomsci/ddm/pipeline/perf_data.py | 288 +++++++------------- atomsci/ddm/pipeline/transformations.py | 12 +- atomsci/ddm/test/unit/test_model_wrapper.py | 2 +- 6 files changed, 204 insertions(+), 268 deletions(-) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index 87bbffd9..36f6be3b 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -379,6 +379,7 @@ def get_featurized_data(self, params=None): if params.prediction_type=='classification': w = w.astype(np.float32) + self.untransformed_dataset = NumpyDataset(features, self.vals, ids=ids) self.dataset = NumpyDataset(features, self.vals, ids=ids, w=w) self.log.info("Using prefeaturized data; number of features = " + str(self.n_features)) return @@ -404,6 +405,7 @@ def get_featurized_data(self, params=None): self.log.debug("Number of features: " + str(self.n_features)) # Create the DeepChem dataset + self.untransformed_dataset = NumpyDataset(features, self.vals, ids=ids) self.dataset = NumpyDataset(features, self.vals, ids=ids, w=w) # Checking for minimum number of rows if len(self.dataset) < params.min_compound_number: @@ -681,7 +683,7 @@ def has_all_feature_columns(self, dset_df): # ************************************************************************************* - def get_subset_responses_and_weights(self, subset, transformers): + def get_subset_responses_and_weights(self, subset): """Returns a dictionary mapping compound IDs in the given dataset subset to arrays of response values and weights. Used by the perf_data module under k-fold CV. @@ -703,9 +705,13 @@ def get_subset_responses_and_weights(self, subset, transformers): else: raise ValueError('Unknown dataset subset type "%s"' % subset) - y = dc.trans.undo_transforms(dataset.y, transformers) + response_vals = dict() + dataset_ids = set(dataset.ids) + for id, y in zip(self.untransformed_dataset.ids, self.untransformed_dataset.y): + if id in dataset_ids: + response_vals[id] = y + w = dataset.w - response_vals = dict([(id, y[i,:]) for i, id in enumerate(dataset.ids)]) weights = dict([(id, w[i,:]) for i, id in enumerate(dataset.ids)]) self.subset_response_dict[subset] = response_vals self.subset_weight_dict[subset] = weights @@ -713,6 +719,19 @@ def get_subset_responses_and_weights(self, subset, transformers): # ************************************************************************************* + def get_untransformed_responses(self, ids): + """ Returns a numpy array of untransformed response values + """ + response_vals = np.zeros((len(ids), self.untransformed_dataset.y.shape[1])) + response_dict = dict([(id, y) for id, y in zip(self.untransformed_dataset.ids, self.untransformed_dataset.y)]) + + for i, id in enumerate(ids): + response_vals[i] = response_dict[id] + + return response_vals + + # ************************************************************************************* + def _get_split_key(self): """Creates the proper CSV name for a split file @@ -828,6 +847,8 @@ def get_featurized_data(self, dset_df, is_featurized=False): params, self.contains_responses) self.log.warning("Done") self.n_features = self.featurization.get_feature_count() + + self.untransformed_dataset= NumpyDataset(features, self.vals, ids=ids) self.dataset = NumpyDataset(features, self.vals, ids=ids) # **************************************************************************************** diff --git a/atomsci/ddm/pipeline/model_pipeline.py b/atomsci/ddm/pipeline/model_pipeline.py index 70dd7f84..8cb3adc9 100644 --- a/atomsci/ddm/pipeline/model_pipeline.py +++ b/atomsci/ddm/pipeline/model_pipeline.py @@ -305,13 +305,6 @@ def load_featurize_data(self, params=None): else: self.run_mode = '' - if self.run_mode == 'training': - for i, (train, valid) in enumerate(self.data.train_valid_dsets): - train = self.model_wrapper.transform_dataset(train) - valid = self.model_wrapper.transform_dataset(valid) - self.data.train_valid_dsets[i] = (train, valid) - self.data.test_dset = self.model_wrapper.transform_dataset(self.data.test_dset) - # **************************************************************************************** def create_model_metadata(self): @@ -864,7 +857,7 @@ def predict_full_dataset(self, dset_df, is_featurized=False, contains_responses= # Get features for each compound and construct a DeepChem Dataset from them self.data.get_featurized_data(dset_df, is_featurized) # Transform the features and responses if needed - self.data.dataset = self.model_wrapper.transform_dataset(self.data.dataset) + self.data.dataset = self.model_wrapper.transform_dataset(self.data.dataset, fold='final') # Note that at this point, the dataset may contain fewer rows than the input. Typically this happens because # of invalid SMILES strings. Remove any rows from the input dataframe corresponding to SMILES strings that were @@ -995,7 +988,7 @@ def predict_embedding(self, dset_df, dset_params=None): self.data = model_datasets.create_minimal_dataset(self.params, self.featurization) self.data.get_featurized_data(dset_df, is_featurized=False) # Not sure the following is necessary - self.data.dataset = self.model_wrapper.transform_dataset(self.data.dataset) + self.data.dataset = self.model_wrapper.transform_dataset(self.data.dataset, fold='final') # Get the embeddings as a numpy array embeddings = self.model_wrapper.generate_embeddings(self.data.dataset) @@ -1577,7 +1570,7 @@ def ensemble_predict(model_uuids, collections, dset_df, labels=None, dset_params raise Exception("response_cols missing from model params") is_featurized = (len(set(pipe.featurization.get_feature_columns()) - set(dset_df.columns.values)) == 0) pipe.data.get_featurized_data(dset_df, is_featurized) - pipe.data.dataset = pipe.model_wrapper.transform_dataset(pipe.data.dataset) + pipe.data.dataset = pipe.model_wrapper.transform_dataset(pipe.data.dataset, fold='final') # Create a temporary data frame to hold the compound IDs and predictions. The model may not # return predictions for all the requested compounds, so we have to outer join the predictions diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index 2ff7ccee..521db40c 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -8,6 +8,7 @@ import joblib import deepchem as dc +import deepchem.trans as dctrans import numpy as np import tensorflow as tf if dc.__version__.startswith('2.1'): @@ -441,7 +442,7 @@ def reload_transformers(self): # **************************************************************************************** - def transform_dataset(self, dataset, fold='final'): + def transform_dataset(self, dataset, fold): """Transform the responses and/or features in the given DeepChem dataset using the current transformers. Args: @@ -514,19 +515,14 @@ def get_test_perf_data(self, model_dir, model_dataset, fold): # Create a PerfData object, which knows how to format the prediction results in the structure # expected by the model tracker. - - # We pass transformed=False to indicate that the preds and uncertainties we get from - # generate_predictions are already untransformed, so that perf_data.get_prediction_results() - # doesn't untransform them again. if hasattr(self.transformers['final'][0], "ishybrid"): # indicate that we are training a hybrid model - # ASDF need to know what to pass in as the y transform now that they are fold dependent. - perf_data = perf.create_perf_data("hybrid", model_dataset, self.transformers, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False) + perf_data = perf.create_perf_data("hybrid", model_dataset, 'test', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio) else: - perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'test', transformed=False) + perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, 'test') test_dset = model_dataset.test_dset test_preds, test_stds = self.generate_predictions(test_dset) - _ = perf_data.accumulate_preds(test_preds, test_dset.ids, test_stds, fold=fold) + _ = perf_data.accumulate_preds(test_preds, test_dset.ids, test_stds) return perf_data # **************************************************************************************** @@ -558,17 +554,13 @@ def get_full_dataset_perf_data(self, model_dataset, fold): # Create a PerfData object, which knows how to format the prediction results in the structure # expected by the model tracker. - - # We pass transformed=False to indicate that the preds and uncertainties we get from - # generate_predictions are already untransformed, so that perf_data.get_prediction_results() - # doesn't untransform them again. if hasattr(self.transformers['final'][0], "ishybrid"): # indicate that we are training a hybrid model - perf_data = perf.create_perf_data("hybrid", model_dataset, self.transformers, 'full', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio, transformed=False) + perf_data = perf.create_perf_data("hybrid", model_dataset, 'full', is_ki=self.params.is_ki, ki_convert_ratio=self.params.ki_convert_ratio) else: - perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, self.transformers, 'full', transformed=False) + perf_data = perf.create_perf_data(self.params.prediction_type, model_dataset, 'full') full_preds, full_stds = self.generate_predictions(model_dataset.dataset) - _ = perf_data.accumulate_preds(full_preds, model_dataset.dataset.ids, full_stds, fold) + _ = perf_data.accumulate_preds(full_preds, model_dataset.dataset.ids, full_stds) return perf_data # **************************************************************************************** @@ -901,12 +893,9 @@ def train_kfold_cv(self, pipeline): subsets={'train':'train_valid', 'valid':'valid', 'test':'test'}, prediction_type=self.params.prediction_type, model_dataset=pipeline.data, - production=self.params.production, - transformers=self.transformers) - em.set_make_pred(lambda x: self.model.predict(x, [])) - em.on_new_best_valid(lambda : 1+1) # does not need to take any action + production=self.params.production) - test_dset = pipeline.data.test_dset + em.on_new_best_valid(lambda : 1+1) # does not need to take any action # Train a separate model for each fold models = [] @@ -915,21 +904,28 @@ def train_kfold_cv(self, pipeline): for ei in LCTimerKFoldIterator(self.params, pipeline, self.log): # Create PerfData structures that are only used within loop to compute metrics during initial training - train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'train') - test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test') + train_perf_data = perf.create_perf_data(self.params.prediction_type, 'train') + test_perf_data = perf.create_perf_data(self.params.prediction_type, 'test') for k in range(num_folds): self.model = models[k] train_dset, valid_dset = pipeline.data.train_valid_dsets[k] + train_dset = self.transform_dataset(train_dset, fold=k) + valid_dset = self.transform_dataset(valid_dset, fold=k) + test_dset = self.transform_dataset(pipeline.data.test_dset, fold=k) # We turn off automatic checkpointing - we only want to save a checkpoints for the final model. self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0, restore=False) - train_pred = self.model.predict(train_dset, []) - test_pred = self.model.predict(test_dset, []) - - train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids, fold=k) - test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids, fold=k) - - valid_perf = em.accumulate(ei, subset='valid', dset=valid_dset, fold=k) + train_pred = self.model.predict(train_dset, [self.transformers[k]]) + test_pred = self.model.predict(test_dset, [self.transformers[k]]) + + train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids) + test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids) + + # update the make pred function to include latest transformers + def make_pred(x): + return self.model.predict(x, self.transformers[k]) + em.set_make_pred(make_pred) + valid_perf = em.accumulate(ei, subset='valid', dset=valid_dset, transforms=self.transformers[k]) self.log.info("Fold %d, epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) @@ -945,7 +941,12 @@ def train_kfold_cv(self, pipeline): # Train a new model for best_epoch epochs on the combined training/validation set. Compute the training and test # set metrics at each epoch. - fit_dataset = pipeline.data.combined_training_data() + fit_dataset = self.transform_dataset(pipeline.data.combined_training_data(), fold='final') + test_dset = self.transform_dataset(pipeline.data.test_dset, fold='final') + def make_pred(x): + return self.model.predict(x, self.transformers['final']) + em.set_make_pred(make_pred) + retrain_start = time.time() self.model = self.recreate_model() self.log.info(f"Best epoch was {self.best_epoch}, retraining with combined training/validation set") @@ -1000,19 +1001,22 @@ def train_with_early_stopping(self, pipeline): em = perf.EpochManager(self, prediction_type=self.params.prediction_type, model_dataset=pipeline.data, - production=self.params.production, - transformers=self.transformers) - em.set_make_pred(lambda x: self.model.predict(x, [])) + production=self.params.production) + def make_pred(dset): + return self.model.predict(dset, self.transformers['final']) + em.set_make_pred(make_pred) em.on_new_best_valid(lambda : self.model.save_checkpoint()) - test_dset = pipeline.data.test_dset train_dset, valid_dset = pipeline.data.train_valid_dsets[0] + train_dset = self.transform_dataset(train_dset, 'final') + valid_dset = self.transform_dataset(valid_dset, 'final') + test_dset = self.transform_dataset(pipeline.data.test_dset, 'final') for ei in LCTimerIterator(self.params, pipeline, self.log): # Train the model for one epoch. We turn off automatic checkpointing, so the last checkpoint # saved will be the one we created intentionally when we reached a new best validation score. self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0) train_perf, valid_perf, test_perf = em.update_epoch(ei, - train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset, fold='final') + train_dset=train_dset, valid_dset=valid_dset, test_dset=test_dset) self.log.info("Epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, @@ -1447,9 +1451,11 @@ def train(self, pipeline): opt, ei, self.model_dict)) train_dset, valid_dset = pipeline.data.train_valid_dsets[0] + train_dset = self.transform_dataset(train_dset, 'final') + valid_dset = self.transform_dataset(valid_dset, 'final') + test_dset = self.transform_dataset(pipeline.data.test_dset, 'final') if len(pipeline.data.train_valid_dsets) > 1: raise Exception("Currently the hybrid model doesn't support K-fold cross validation splitting.") - test_dset = pipeline.data.test_dset train_data, valid_data = self.train_valid_dsets[0] for ei in LCTimerIterator(self.params, pipeline, self.log): # Train the model for one epoch. We turn off automatic checkpointing, so the last checkpoint @@ -1657,25 +1663,27 @@ def train(self, pipeline): self.data = pipeline.data self.best_epoch = None - self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers,'train') - self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid') - self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test') + self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'train') + self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'valid') + self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'test') - test_dset = pipeline.data.test_dset num_folds = len(pipeline.data.train_valid_dsets) for k in range(num_folds): train_dset, valid_dset = pipeline.data.train_valid_dsets[k] + train_dset = self.transform_dataset(train_dset, fold=k) + valid_dset = self.transform_dataset(valid_dset, fold=k) + test_dset = self.transform_dataset(pipeline.data.test_dset, fold=k) self.model.fit(train_dset) - train_pred = self.model.predict(train_dset, []) - train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids, fold=k) + train_pred = self.model.predict(train_dset, self.transformers[k]) + train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids) - valid_pred = self.model.predict(valid_dset, []) - valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids, fold=k) + valid_pred = self.model.predict(valid_dset, self.transformers[k]) + valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids) - test_pred = self.model.predict(test_dset, []) - test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids, fold=k) + test_pred = self.model.predict(test_dset, self.transformers[k]) + test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids) self.log.info("Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) @@ -1692,6 +1700,7 @@ def train(self, pipeline): if num_folds > 1: # For k-fold CV, retrain on the combined training and validation sets fit_dataset = self.data.combined_training_data() + fit_dataset = self.transform_dataset(fit_dataset, fold='final') self.model.fit(fit_dataset) self.model_save() # The best model is just the single RF training run. @@ -1898,7 +1907,7 @@ def generate_predictions(self, dataset): pred, std = None, None self.log.info("Evaluating current model") - pred = self.model.predict(dataset, self.transformers) + pred = self.model.predict(dataset, self.transformers['final']) ncmpds = pred.shape[0] pred = pred.reshape((ncmpds,1,-1)) @@ -1908,7 +1917,7 @@ def generate_predictions(self, dataset): ## s.d. from forest if self.params.transformers and self.transformers is not None: RF_per_tree_pred = [dc.trans.undo_transforms( - tree.predict(dataset.X), self.transformers) for tree in rf_model.estimators_] + tree.predict(dataset.X), self.transformers['final']) for tree in rf_model.estimators_] else: RF_per_tree_pred = [tree.predict(dataset.X) for tree in rf_model.estimators_] @@ -2076,25 +2085,27 @@ def train(self, pipeline): self.data = pipeline.data self.best_epoch = None - self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers,'train') - self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'valid') - self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, self.transformers, 'test') + self.train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'train') + self.valid_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'valid') + self.test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'test') - test_dset = pipeline.data.test_dset num_folds = len(pipeline.data.train_valid_dsets) for k in range(num_folds): train_dset, valid_dset = pipeline.data.train_valid_dsets[k] + train_dset = self.transform_dataset(train_dset, fold=k) + valid_dset = self.transform_dataset(valid_dset, fold=k) + test_dset = self.transform_dataset(pipeline.data.test_dset, fold=k) self.model.fit(train_dset) - train_pred = self.model.predict(train_dset, []) - train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids, fold=k) + train_pred = self.model.predict(train_dset, self.transformers[k]) + train_perf = self.train_perf_data.accumulate_preds(train_pred, train_dset.ids) - valid_pred = self.model.predict(valid_dset, []) - valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids, fold=k) + valid_pred = self.model.predict(valid_dset, self.transformers[k]) + valid_perf = self.valid_perf_data.accumulate_preds(valid_pred, valid_dset.ids) - test_pred = self.model.predict(test_dset, []) - test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids, fold=k) + test_pred = self.model.predict(test_dset, self.transformers[k]) + test_perf = self.test_perf_data.accumulate_preds(test_pred, test_dset.ids) self.log.info("Fold %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) @@ -2110,6 +2121,7 @@ def train(self, pipeline): if num_folds > 1: # For k-fold CV, retrain on the combined training and validation sets fit_dataset = self.data.combined_training_data() + fit_dataset = self.transform_dataset(fit_dataset, fold='final') self.model.fit(fit_dataset) self.model_save() # The best model is just the single xgb training run. diff --git a/atomsci/ddm/pipeline/perf_data.py b/atomsci/ddm/pipeline/perf_data.py index 48271d32..6d0590da 100644 --- a/atomsci/ddm/pipeline/perf_data.py +++ b/atomsci/ddm/pipeline/perf_data.py @@ -11,10 +11,6 @@ from sklearn.metrics import accuracy_score, matthews_corrcoef, cohen_kappa_score, log_loss, balanced_accuracy_score from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error -from atomsci.ddm.pipeline import transformations as trans - - - # ****************************************************************************************************************************** def rms_error(y_real, y_pred): """Calculates the root mean squared error. Score function used for model selection. @@ -75,7 +71,7 @@ def negative_predictive_value(y_real, y_pred): binary_class_only = {'npv'} # ****************************************************************************************************************************** -def create_perf_data(prediction_type, model_dataset, transformers, subset, **kwargs): +def create_perf_data(prediction_type, model_dataset, subset, **kwargs): """Factory function that creates the right kind of PerfData object for the given subset, prediction_type (classification or regression) and split strategy (k-fold or train/valid/test). @@ -84,8 +80,6 @@ def create_perf_data(prediction_type, model_dataset, transformers, subset, **kwa model_dataset (ModelDataset): Object representing the full dataset. - transformers (list): A list of transformer objects. - subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions **kwargs: Additional PerfData subclass arguments @@ -104,20 +98,20 @@ def create_perf_data(prediction_type, model_dataset, transformers, subset, **kwa if prediction_type == 'regression': if subset == 'full' or split_strategy == 'train_valid_test': # Called simple because no need to track compound IDs across multiple training folds - return SimpleRegressionPerfData(model_dataset, transformers, subset, **kwargs) + return SimpleRegressionPerfData(model_dataset, subset, **kwargs) elif split_strategy == 'k_fold_cv': - return KFoldRegressionPerfData(model_dataset, transformers, subset, **kwargs) + return KFoldRegressionPerfData(model_dataset, subset, **kwargs) else: raise ValueError('Unknown split_strategy %s' % split_strategy) elif prediction_type == 'classification': if subset == 'full' or split_strategy == 'train_valid_test': - return SimpleClassificationPerfData(model_dataset, transformers, subset, **kwargs) + return SimpleClassificationPerfData(model_dataset, subset, **kwargs) elif split_strategy == 'k_fold_cv': - return KFoldClassificationPerfData(model_dataset, transformers, subset, **kwargs) + return KFoldClassificationPerfData(model_dataset, subset, **kwargs) else: raise ValueError('Unknown split_strategy %s' % split_strategy) elif prediction_type == "hybrid": - return SimpleHybridPerfData(model_dataset, transformers, subset, **kwargs) + return SimpleHybridPerfData(model_dataset, subset, **kwargs) else: raise ValueError('Unknown prediction type %s' % prediction_type) @@ -132,21 +126,21 @@ def __init__(self, model_dataset, subset): """Initialize any attributes that are common to all PerfData subclasses""" # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ raise NotImplementedError # **************************************************************************************** - def get_pred_values(self, fold): + def get_pred_values(self): """Raises: NotImplementedError: The method is implemented by subclasses """ raise NotImplementedError # **************************************************************************************** - def get_real_values(self, fold, ids=None): + def get_real_values(self, ids=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -170,7 +164,7 @@ def compute_perf_metrics(self, per_task=False): raise NotImplementedError # **************************************************************************************** - def get_prediction_results(self, fold): + def get_prediction_results(self): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -217,14 +211,14 @@ def __init__(self, model_dataset, subset): self.weights = None # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ raise NotImplementedError # **************************************************************************************** - def get_pred_values(self, fold): + def get_pred_values(self): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -257,7 +251,7 @@ def model_choice_score(self, score_type='r2'): """ ids, pred_vals, stds = self.get_pred_values() - real_vals = self.get_real_values('train_valid', ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) scores = [] for i in range(self.num_tasks): @@ -274,7 +268,7 @@ def model_choice_score(self, score_type='r2'): # **************************************************************************************** # class RegressionPerfData - def get_prediction_results(self, fold): + def get_prediction_results(self): """Returns a dictionary of performance metrics for a regression model. The dictionary values should contain only primitive Python types, so that it can be easily JSONified. @@ -303,8 +297,8 @@ def get_prediction_results(self, fold): # and then averaging the metrics. If people start asking for SDs of MAE and RMSE scores over folds, # we'll change the code to compute all metrics the same way. - (ids, pred_vals, pred_stds) = self.get_pred_values(fold=fold) - real_vals = self.get_real_values(ids, fold=fold) + (ids, pred_vals, pred_stds) = self.get_pred_values() + real_vals = self.get_real_values(ids) weights = self.get_weights(ids) mae_scores = [] rms_scores = [] @@ -406,7 +400,7 @@ def __init__(self, model_dataset, subset): self.weights = None # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -442,7 +436,7 @@ def model_choice_score(self, score_type='r2'): """ ids, pred_vals, stds = self.get_pred_values() - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) scores = [] @@ -477,7 +471,7 @@ def model_choice_score(self, score_type='r2'): # **************************************************************************************** # class HybridPerfData - def get_prediction_results(self, fold): + def get_prediction_results(self): """Returns a dictionary of performance metrics for a regression model. The dictionary values should contain only primitive Python types, so that it can be easily JSONified. @@ -507,7 +501,7 @@ def get_prediction_results(self, fold): # we'll change the code to compute all metrics the same way. (ids, pred_vals, pred_stds) = self.get_pred_values() - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) mae_scores = [] rms_scores = [] @@ -631,7 +625,7 @@ def __init__(self, model_dataset, subset): self.weights = None # **************************************************************************************** - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Raises: NotImplementedError: The method is implemented by subclasses """ @@ -707,7 +701,7 @@ def model_choice_score(self, score_type='roc_auc'): # **************************************************************************************** # class ClassificationPerfData - def get_prediction_results(self, fold): + def get_prediction_results(self): """Returns a dictionary of performance metrics for a classification model. The dictionary values will contain only primitive Python types, so that it can be easily JSONified. @@ -722,7 +716,7 @@ def get_prediction_results(self, fold): pred_results = {} (ids, pred_classes, class_probs, prob_stds) = self.get_pred_values() - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) if self.num_classes > 2: real_classes = np.argmax(real_vals, axis=2) @@ -882,26 +876,20 @@ class KFoldRegressionPerfData(RegressionPerfData): folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values """ # **************************************************************************************** # class KFoldRegressionPerfData - def __init__(self, model_dataset, transformers, subset, transformed=True): + def __init__(self, model_dataset, subset): """# Initialize any attributes that are common to all KFoldRegressionPerfData subclasses Args: model_dataset (ModelDataset object): contains the dataset and related methods - transformers (list of transformer objects): contains the list of transformers used to transform the dataset - subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions - transformed (bool): True if values to be passed to accumulate preds function are transformed values - Side effects: Sets the following attributes of KFoldRegressionPerfData: subset (str): Label of the type of subset of dataset for tracking predictions @@ -914,8 +902,6 @@ def __init__(self, model_dataset, transformers, subset, transformed=True): folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values """ @@ -932,20 +918,13 @@ def __init__(self, model_dataset, transformers, subset, transformed=True): self.folds = 0 self.perf_metrics = [] self.model_score = None - # Want predictions and real values to be in the same space, either transformed or untransformed - if transformed: - # Predictions passed to accumulate_preds() will be transformed - self.real_vals, self.weights = model_dataset.get_subset_responses_and_weights(self.subset, []) - self.transformers = transformers - else: - # If these were never transformed, transformers will be [], which is fine with undo_transforms - self.real_vals, self.weights = model_dataset.get_subset_responses_and_weights(self.subset, transformers) - self.transformers = [] + # Want predictions and real values to be in the same space, untransformed + self.real_vals, self.weights = model_dataset.get_subset_responses_and_weights(self.subset) # **************************************************************************************** # class KFoldRegressionPerfData - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Add training, validation or test set predictions from the current fold to the data structure where we keep track of them. @@ -989,15 +968,13 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): self.pred_vals[id] = np.concatenate([self.pred_vals[id], predicted_vals[i,:].reshape((1,-1))], axis=0) self.folds += 1 - pred_vals = dc.trans.undo_transforms(predicted_vals, self.transformers[fold]) - - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) scores = [] for i in range(self.num_tasks): nzrows = np.where(weights[:,i] != 0)[0] task_real_vals = np.squeeze(real_vals[nzrows,i]) - task_pred_vals = np.squeeze(pred_vals[nzrows,i]) + task_pred_vals = np.squeeze(predicted_vals[nzrows,i]) scores.append(regr_score_func['r2'](task_real_vals, task_pred_vals)) self.perf_metrics.append(np.array(scores)) return float(np.mean(scores)) @@ -1005,8 +982,8 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): # **************************************************************************************** # class KFoldRegressionPerfData - def get_pred_values(self, fold): - """Returns the predicted values accumulated over training, with any transformations undone. + def get_pred_values(self): + """Returns the predicted values accumulated over training. If self.subset is 'train' or 'test', the function will return averages over the training folds for each compound along with standard deviations when there are predictions from multiple folds. Otherwise, returns a single predicted value for each compound. @@ -1022,38 +999,36 @@ def get_pred_values(self, fold): """ ids = sorted(self.pred_vals.keys()) if self.subset in ['train', 'test', 'train_valid']: - rawvals = np.concatenate([self.pred_vals[id].mean(axis=0, keepdims=True).reshape((1,-1)) for id in ids]) - vals = dc.trans.undo_transforms(rawvals, self.transformers[fold]) + vals = np.concatenate([self.pred_vals[id].mean(axis=0, keepdims=True).reshape((1,-1)) for id in ids]) if self.folds > 1: - stds = dc.trans.undo_transforms(np.concatenate([self.pred_vals[id].std(axis=0, keepdims=True).reshape((1,-1)) - for id in ids]), self.transformers[fold]) + stds = np.concatenate([self.pred_vals[id].std(axis=0, keepdims=True).reshape((1,-1)) + for id in ids]) else: stds = None else: - rawvals = np.concatenate([self.pred_vals[id].reshape((1,-1)) for id in ids], axis=0) - vals = dc.trans.undo_transforms(rawvals, self.transformers[fold]) + vals = np.concatenate([self.pred_vals[id].reshape((1,-1)) for id in ids], axis=0) stds = None return (ids, vals, stds) # **************************************************************************************** # class KFoldRegressionPerfData - def get_real_values(self, fold, ids=None): - """Returns the real dataset response values, with any transformations undone, as an (ncmpds, ntasks) array + def get_real_values(self, ids=None): + """Returns the real dataset response values, as an (ncmpds, ntasks) array in the same ID order as get_pred_values() (unless ids is specified). Args: ids (list of str): Optional list of compound IDs to return values for. Returns: - np.array (ncmpds, ntasks) of the real dataset response values, with any transformations undone, in the same + np.array (ncmpds, ntasks) of the real dataset response values, in the same ID order as get_pred_values(). """ if ids is None: ids = sorted(self.pred_vals.keys()) real_vals = np.concatenate([self.real_vals[id].reshape((1,-1)) for id in ids], axis=0) - return dc.trans.undo_transforms(real_vals, self.transformers[fold]) + return real_vals # **************************************************************************************** @@ -1117,7 +1092,6 @@ class KFoldClassificationPerfData(ClassificationPerfData): num_tasks (int): The number of tasks in the dataset pred-vals (dict): The dictionary of prediction results folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments real_vals (dict): The dictionary containing the origin response column values class_names (np.array): Assumes the classes are of deepchem index type (e.g. 0,1,2,...) num_classes (int): The number of classes to predict on @@ -1125,21 +1099,17 @@ class KFoldClassificationPerfData(ClassificationPerfData): # **************************************************************************************** # class KFoldClassificationPerfData - def __init__(self, model_dataset, transformers, subset, predict_probs=True, transformed=True): + def __init__(self, model_dataset, subset, predict_probs=True): """Initialize any attributes that are common to all KFoldClassificationPerfData subclasses Args: model_dataset (ModelDataset object): contains the dataset and related methods - transformers (list of transformer objects): contains the list of transformers used to transform the dataset - subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions predict_probs (bool): True if using classifier supports probabilistic predictions, False otherwise - transformed (bool): True if values to be passed to accumulate preds function are transformed values - Raises: ValueError if subset not in ['train','valid','test'], unsupported dataset subset @@ -1157,8 +1127,6 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values in one-hot encoding class_names (np.array): Assumes the classes are of deepchem index type (e.g. 0,1,2,...) @@ -1187,7 +1155,7 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran self.num_classes = len(set(model_dataset.dataset.y.flatten())) self.pred_vals = dict([(id, np.empty((0, self.num_tasks, self.num_classes), dtype=np.float32)) for id in dataset.ids]) - real_vals, self.weights = model_dataset.get_subset_responses_and_weights(self.subset, []) + real_vals, self.weights = model_dataset.get_subset_responses_and_weights(self.subset) self.real_classes = real_vals # Change real_vals to one-hot encoding if self.num_classes > 2: @@ -1201,16 +1169,11 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran self.folds = 0 self.perf_metrics = [] self.model_score = None - if transformed: - # Predictions passed to accumulate_preds() will be transformed - self.transformers = transformers - else: - self.transformers = [] # **************************************************************************************** # class KFoldClassificationPerfData - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Add training, validation or test set predictions from the current fold to the data structure where we keep track of them. @@ -1234,7 +1197,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): for i, id in enumerate(ids): self.pred_vals[id] = np.concatenate([self.pred_vals[id], class_probs[i,:,:].reshape((1,self.num_tasks,-1))], axis=0) self.folds += 1 - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) # Break out different predictions for each task, with zero-weight compounds masked out, and compute per-task metrics scores = [] @@ -1243,25 +1206,21 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): if self.num_classes > 2: # If more than 2 classes, real_vals is indicator matrix (one-hot encoded). task_real_vals = np.squeeze(real_vals[nzrows,i,:]) - task_class_probs = dc.trans.undo_transforms( - np.squeeze(class_probs[nzrows,i,:]), - self.transformers[fold]) + task_class_probs =np.squeeze(class_probs[nzrows,i,:]) scores.append(roc_auc_score(task_real_vals, task_class_probs, average='macro')) else: # For binary classifier, sklearn metrics functions are expecting single array of 1s and 0s for real_vals_list, # and class_probs for class 1 only. task_real_vals = np.squeeze(real_vals[nzrows,i]) - task_class_probs = dc.trans.undo_transforms( - np.squeeze(class_probs[nzrows,i,1]), - self.transformers[fold]) + task_class_probs = np.squeeze(class_probs[nzrows,i,1]) scores.append(roc_auc_score(task_real_vals, task_class_probs)) self.perf_metrics.append(np.array(scores)) return float(np.mean(scores)) # **************************************************************************************** # class KFoldClassificationPerfData - def get_pred_values(self, fold): - """Returns the predicted values accumulated over training, with any transformations undone. If self.subset + def get_pred_values(self): + """Returns the predicted values accumulated over training. If self.subset is 'train', 'train_valid' or 'test', the function will return the means and standard deviations of the class probabilities over the training folds for each compound, for each task. Otherwise, returns a single set of predicted probabilites for each validation set compound. For all subsets, returns the compound IDs and the most probable classes for each task. @@ -1279,16 +1238,12 @@ def get_pred_values(self, fold): """ ids = sorted(self.pred_vals.keys()) if self.subset in ['train', 'test', 'train_valid']: - #class_probs = np.concatenate([dc.trans.undo_transforms(self.pred_vals[id], self.transformers).mean(axis=0, keepdims=True) - # for id in ids], axis=0) - #prob_stds = np.concatenate([dc.trans.undo_transforms(self.pred_vals[id], self.transformers).std(axis=0, keepdims=True) - # for id in ids], axis=0) - class_probs = dc.trans.undo_transforms(np.concatenate([self.pred_vals[id].mean(axis=0, keepdims=True) - for id in ids], axis=0), self.transformers[fold]) - prob_stds = dc.trans.undo_transforms(np.concatenate([self.pred_vals[id].std(axis=0, keepdims=True) - for id in ids], axis=0), self.transformers[fold]) + class_probs = np.concatenate([self.pred_vals[id].mean(axis=0, keepdims=True) + for id in ids], axis=0) + prob_stds = np.concatenate([self.pred_vals[id].std(axis=0, keepdims=True) + for id in ids], axis=0) else: - class_probs = np.concatenate([dc.trans.undo_transforms(self.pred_vals[id], self.transformers[fold]) for id in ids], axis=0) + class_probs = np.concatenate([self.pred_vals[id] for id in ids], axis=0) prob_stds = None pred_classes = np.argmax(class_probs, axis=2) return (ids, pred_classes, class_probs, prob_stds) @@ -1381,27 +1336,21 @@ class SimpleRegressionPerfData(RegressionPerfData): folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values """ # **************************************************************************************** # class SimpleRegressionPerfData - def __init__(self, model_dataset, transformers, subset, transformed=True): + def __init__(self, model_dataset, subset): """Initialize any attributes that are common to all SimpleRegressionPerfData subclasses Args: model_dataset (ModelDataset object): contains the dataset and related methods - transformers (list of transformer objects): contains the list of transformers used to transform the dataset - subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions - transformed (bool): True if values to be passed to accumulate preds function are transformed values - Raises: ValueError: if subset not in ['train','valid','test','full'], subset not supported @@ -1415,8 +1364,6 @@ def __init__(self, model_dataset, transformers, subset, transformed=True): pred_vals (dict): The dictionary of prediction results - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values """ @@ -1439,18 +1386,13 @@ def __init__(self, model_dataset, transformers, subset, transformed=True): self.pred_stds = None self.perf_metrics = [] self.model_score = None - if transformed: - # Predictions passed to accumulate_preds() will be transformed - self.transformers = transformers - self.real_vals = dataset.y - else: - self.real_vals = dc.trans.undo_transforms(dataset.y, transformers) - self.transformers = [] + + self.real_vals = model_dataset.get_untransformed_responses(dataset.ids) # **************************************************************************************** # class SimpleRegressionPerfData - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Add training, validation or test set predictions to the data structure where we keep track of them. @@ -1469,8 +1411,8 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): self.pred_vals = self._reshape_preds(predicted_vals) if pred_stds is not None: self.pred_stds = self._reshape_preds(pred_stds) - pred_vals = dc.trans.undo_transforms(self.pred_vals, self.transformers[fold]) - real_vals = self.get_real_values(ids) + pred_vals = self.pred_vals + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) scores = [] for i in range(self.num_tasks): @@ -1484,8 +1426,8 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): # **************************************************************************************** # class SimpleRegressionPerfData - def get_pred_values(self, fold): - """Returns the predicted values accumulated over training, with any transformations undone. Returns + def get_pred_values(self): + """Returns the predicted values accumulated over training. Returns a tuple (ids, values, stds), where ids is the list of compound IDs, values is a (ncmpds, ntasks) array of predictions, and stds is always None for this class. @@ -1497,33 +1439,28 @@ def get_pred_values(self, fold): stds (np.array): Contains (ncmpds, ntasks) array of prediction standard deviations """ - vals = dc.trans.undo_transforms(self.pred_vals, self.transformers[fold]) + vals = self.pred_vals stds = None if self.pred_stds is not None: stds = self.pred_stds - if len(self.transformers[fold]) == 1 and (isinstance(self.transformers[fold][0], dc.trans.NormalizationTransformer) or isinstance(self.transformers[fold][0],trans.NormalizationTransformerMissingData)): - # Untransform the standard deviations, if we can. This is a bit of a hack, but it works for - # NormalizationTransformer, since the standard deviations used to scale the data are - # stored in the transformer object. - y_stds = self.transformers[fold][0].y_stds.reshape((1,-1,1)) - stds = stds / y_stds + return (self.ids, vals, stds) # **************************************************************************************** # class SimpleRegressionPerfData - def get_real_values(self, fold, ids=None): - """Returns the real dataset response values, with any transformations undone, as an (ncmpds, ntasks) array + def get_real_values(self, ids=None): + """Returns the real dataset response values, as an (ncmpds, ntasks) array with compounds in the same ID order as in the return from get_pred_values(). Args: ids: Ignored for this class Returns: - np.array: Containing the real dataset response values with transformations undone. + np.array: Containing the real dataset response values. """ - return dc.trans.undo_transforms(self.real_vals, self.transformers[fold]) + return self.real_vals # **************************************************************************************** @@ -1581,8 +1518,6 @@ class SimpleClassificationPerfData(ClassificationPerfData): folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values class_names (np.array): Assumes the classes are of deepchem index type (e.g. 0,1,2,...) @@ -1593,21 +1528,17 @@ class SimpleClassificationPerfData(ClassificationPerfData): # **************************************************************************************** # class SimpleClassificationPerfData - def __init__(self, model_dataset, transformers, subset, predict_probs=True, transformed=True): + def __init__(self, model_dataset, subset, predict_probs=True): """Initialize any attributes that are common to all SimpleClassificationPerfData subclasses Args: model_dataset (ModelDataset object): contains the dataset and related methods - transformers (list of transformer objects): contains the list of transformers used to transform the dataset - subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions predict_probs (bool): True if using classifier supports probabilistic predictions, False otherwise - transformed (bool): True if values to be passed to accumulate preds function are transformed values - Raises: ValueError: if subset not in ['train','valid','test','full'], subset not supported @@ -1623,8 +1554,6 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran pred_vals (dict): The dictionary of prediction results - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values num_classes (int): The number of classes to predict on @@ -1661,11 +1590,6 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran self.ids = dataset.ids self.perf_metrics = [] self.model_score = None - if transformed: - # Predictions passed to accumulate_preds() will be transformed - self.transformers = transformers - else: - self.transformers = [] self.weights = dataset.w # TODO: Everything down to here is same as in SimpleRegressionPerfData.__init__. @@ -1674,20 +1598,20 @@ def __init__(self, model_dataset, transformers, subset, predict_probs=True, tran # DeepChem does not currently support arbitary class names in classification datasets; # enforce class indices (0, 1, 2, ...) here. - self.class_indeces = list(set(model_dataset.dataset.y.flatten())) + self.real_classes = model_dataset.get_untransformed_responses(dataset.ids) + self.class_indeces = list(set(self.real_classes.flatten())) self.num_classes = len(self.class_indeces) - self.real_classes = dataset.y # Convert true values to one-hot encoding if self.num_classes > 2: - self.real_vals = np.concatenate([dc.metrics.to_one_hot(dataset.y[:,j], self.num_classes).reshape(-1,1,self.num_classes) + self.real_vals = np.concatenate([dc.metrics.to_one_hot(self.real_classes[:,j], self.num_classes).reshape(-1,1,self.num_classes) for j in range(self.num_tasks)], axis=1) else: - self.real_vals = dataset.y.reshape((-1,self.num_tasks)) + self.real_vals = self.real_classes.reshape((-1,self.num_tasks)) # **************************************************************************************** # class SimpleClassificationPerfData - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Add training, validation or test set predictions from the current dataset to the data structure where we keep track of them. @@ -1705,7 +1629,7 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): class_probs = self.pred_vals = self._reshape_preds(predicted_vals) if pred_stds is not None: self.pred_stds = self._reshape_preds(pred_stds) - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) # Break out different predictions for each task, with zero-weight compounds masked out, and compute per-task metrics scores = [] @@ -1714,25 +1638,22 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): if self.num_classes > 2: # If more than 2 classes, real_vals is indicator matrix (one-hot encoded). task_real_vals = np.squeeze(real_vals[nzrows,i,:]) - task_class_probs = dc.trans.undo_transforms( - np.squeeze(class_probs[nzrows,i,:]), - self.transformers[fold]) + task_class_probs =np.squeeze(class_probs[nzrows,i,:]) + scores.append(roc_auc_score(task_real_vals, task_class_probs, average='macro')) else: # For binary classifier, sklearn metrics functions are expecting single array of 1s and 0s for real_vals_list, # and class_probs for class 1 only. task_real_vals = np.squeeze(real_vals[nzrows,i]) - task_class_probs = dc.trans.undo_transforms( - np.squeeze(class_probs[nzrows,i,1]), - self.transformers[fold]) + task_class_probs = np.squeeze(class_probs[nzrows,i,1]) scores.append(roc_auc_score(task_real_vals, task_class_probs)) self.perf_metrics.append(np.array(scores)) return float(np.mean(scores)) # **************************************************************************************** # class SimpleClassificationPerfData - def get_pred_values(self, fold): - """Returns the predicted values accumulated over training, with any transformations undone. + def get_pred_values(self): + """Returns the predicted values accumulated over training. If self.subset is 'train', the function will average class probabilities over the k-1 folds in which each compound was part of the training set, and return the most probable class. Otherwise, there should be a single set of predicted probabilites for each validation or test set compound. Returns a tuple (ids, @@ -1752,7 +1673,7 @@ class probability estimates. prob_stds (np.array): Contains (ncmpds, ntasks, nclasses) array of standard errors for the class probability estimates """ - class_probs = dc.trans.undo_transforms(self.pred_vals, self.transformers[fold]) + class_probs = self.pred_vals pred_classes = np.argmax(class_probs, axis=2) prob_stds = self.pred_stds return (self.ids, pred_classes, class_probs, prob_stds) @@ -1830,22 +1751,18 @@ class SimpleHybridPerfData(HybridPerfData): folds (int): Initialized at zero, flag for determining which k-fold is being assessed - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values """ # **************************************************************************************** # class SimpleHybridPerfData - def __init__(self, model_dataset, transformers, subset, is_ki, ki_convert_ratio=None, transformed=True): + def __init__(self, model_dataset, subset, is_ki, ki_convert_ratio=None): """Initialize any attributes that are common to all SimpleRegressionPerfData subclasses Args: model_dataset (ModelDataset object): contains the dataset and related methods - transformers (list of transformer objects): contains the list of transformers used to transform the dataset - subset (str): Label in ['train', 'valid', 'test', 'full'], indicating the type of subset of dataset for tracking predictions @@ -1856,8 +1773,6 @@ def __init__(self, model_dataset, transformers, subset, is_ki, ki_convert_ratio= ratio of concentration and Kd of the radioligand in a competitive binding assay, or the concentration of the substrate and Michaelis constant (Km) of enzymatic inhibition assay. - transformed (bool): True if values to be passed to accumulate preds function are transformed values - Raises: ValueError: if subset not in ['train','valid','test','full'], subset not supported @@ -1871,8 +1786,6 @@ def __init__(self, model_dataset, transformers, subset, is_ki, ki_convert_ratio= pred_vals (dict): The dictionary of prediction results - transformers (list of Transformer objects): from input arguments - real_vals (dict): The dictionary containing the origin response column values """ @@ -1897,17 +1810,11 @@ def __init__(self, model_dataset, transformers, subset, is_ki, ki_convert_ratio= self.model_score = None self.is_ki = is_ki self.ki_convert_ratio = ki_convert_ratio - if transformed: - # Predictions passed to accumulate_preds() will be transformed - self.transformers = transformers - self.real_vals = dataset.y - else: - self.real_vals = transformers[0].untransform(dataset.y) - self.transformers = [] + self.real_vals = model_dataset.get_untransformed_responses(dataset.ids) # **************************************************************************************** # class SimpleHybridPerfData - def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): + def accumulate_preds(self, predicted_vals, ids, pred_stds=None): """Add training, validation or test set predictions to the data structure where we keep track of them. @@ -1926,9 +1833,8 @@ def accumulate_preds(self, predicted_vals, ids, fold, pred_stds=None): self.pred_vals = self._reshape_preds(predicted_vals) if pred_stds is not None: self.pred_stds = self._reshape_preds(pred_stds) - # pred_vals = self.transformers[0].untransform(self.pred_vals, isreal=False) pred_vals = self.pred_vals - real_vals = self.get_real_values(ids) + real_vals = self.get_real_values(ids=ids) weights = self.get_weights(ids) scores = [] pos_ki = np.where(np.isnan(real_vals[:, 1]))[0] @@ -1979,8 +1885,8 @@ def _predict_binding(self, activity, conc): # **************************************************************************************** # class SimpleHybridPerfData - def get_pred_values(self, fold): - """Returns the predicted values accumulated over training, with any transformations undone. Returns + def get_pred_values(self): + """Returns the predicted values accumulated over training. Returns a tuple (ids, values, stds), where ids is the list of compound IDs, values is a (ncmpds, ntasks) array of predictions, and stds is always None for this class. @@ -2002,18 +1908,18 @@ def get_pred_values(self, fold): # **************************************************************************************** # class SimpleHybridPerfData - def get_real_values(self, fold, ids=None): - """Returns the real dataset response values, with any transformations undone, as an (ncmpds, ntasks) array + def get_real_values(self, ids=None): + """Returns the real dataset response values, as an (ncmpds, ntasks) array with compounds in the same ID order as in the return from get_pred_values(). Args: ids: Ignored for this class Returns: - np.array: Containing the real dataset response values with transformations undone. + np.array: Containing the real dataset response values. """ - return self.transformers[fold][0].untransform(self.real_vals) + return self.real_vals # **************************************************************************************** @@ -2160,7 +2066,7 @@ def should_stop(self): # **************************************************************************************** # class EpochManager - def update_epoch(self, ei, fold, train_dset=None, valid_dset=None, test_dset=None): + def update_epoch(self, ei, train_dset=None, valid_dset=None, test_dset=None): """Update training state after an epoch This function updates train/valid/test_perf_data. Call this function once @@ -2186,15 +2092,15 @@ def update_epoch(self, ei, fold, train_dset=None, valid_dset=None, test_dset=Non This function updates self._should_stop """ - train_perf = self.update(ei, 'train', train_dset, fold) - valid_perf = self.update(ei, 'valid', valid_dset, fold) - test_perf = self.update(ei, 'test', test_dset, fold) + train_perf = self.update(ei, 'train', train_dset) + valid_perf = self.update(ei, 'valid', valid_dset) + test_perf = self.update(ei, 'test', test_dset) return [p for p in [train_perf, valid_perf, test_perf] if p is not None] # **************************************************************************************** # class EpochManager - def accumulate(self, ei, subset, dset, fold): + def accumulate(self, ei, subset, dset): """Accumulate predictions Makes predictions, accumulate predictions and calculate the performance metric. Calls PerfData.accumulate_preds @@ -2211,7 +2117,7 @@ def accumulate(self, ei, subset, dset, fold): float: Performance metric for the given dset. """ pred = self._make_pred(dset) - perf = getattr(self.wrapper, f'{subset}_perf_data')[ei].accumulate_preds(pred, dset.ids, fold) + perf = getattr(self.wrapper, f'{subset}_perf_data')[ei].accumulate_preds(pred, dset.ids) return perf # **************************************************************************************** @@ -2270,7 +2176,7 @@ def update_valid(self, ei): # **************************************************************************************** # class EpochManager - def update(self, ei, subset, fold, dset=None): + def update(self, ei, subset, dset=None): """Update training state Updates the training state for a given subset and epoch index with the given dataset. @@ -2289,7 +2195,7 @@ def update(self, ei, subset, fold, dset=None): if dset is None: return None - perf = self.accumulate(ei, subset, dset, fold) + perf = self.accumulate(ei, subset, dset) self.compute(ei, subset) if subset == 'valid': diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index a9143f1c..2e7c7420 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -148,7 +148,7 @@ def get_transformer_keys(params): using all validation and training data at the end of the training loop. """ if params.split_strategy != 'k_fold_cv': - return ['final'] + return [0, 'final'] else: return list(range(params.num_folds))+['final'] @@ -157,7 +157,7 @@ def get_blank_transformations(): """Get empty transformations dictionary These keys must always exist, even when there are no transformations """ - return {'final':[]} + return {0:[], 'final':[]} # **************************************************************************************** def get_all_training_datasets(model_dataset): @@ -171,10 +171,14 @@ def get_all_training_datasets(model_dataset): # this dataset is not split into training and validation, use all data result['final'] = model_dataset.dataset elif len(model_dataset.train_valid_dsets)==1: - result['final'] = model_dataset.train_valid_dsets[0] + # there is only one fold, use the training set from that + # for random forests and xgboost models, the final and + # 0th fold are the same if there k-fold is not used + result['final'] = model_dataset.train_valid_dsets[0][0] + result[0] = model_dataset.train_valid_dsets[0][0] else: # First, get the training set from all the folds - for i, t in enumerate(model_dataset.train_valid_dsets): + for i, (t, v) in enumerate(model_dataset.train_valid_dsets): result[i] = t # Next, add the dataset that contains all training+validation data diff --git a/atomsci/ddm/test/unit/test_model_wrapper.py b/atomsci/ddm/test/unit/test_model_wrapper.py index 0b270987..fc43206f 100644 --- a/atomsci/ddm/test/unit/test_model_wrapper.py +++ b/atomsci/ddm/test/unit/test_model_wrapper.py @@ -185,7 +185,7 @@ def test_super_transform_dataset(): mdl = model_wrapper.create_model_wrapper(inp_params, data_obj_ecfp.featurization) mdl.setup_model_dirs() mdl.create_transformers(trans.get_all_training_datasets(data_obj_ecfp)) - dataset = mdl.transform_dataset(data_obj_ecfp.dataset) + dataset = mdl.transform_dataset(data_obj_ecfp.dataset, fold='final') test = [] # checking that the dataset is the correct type From 4689f9e6e9cbc5b4ee4e6d6562ba71fb01c34b0d Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 09:31:22 -0800 Subject: [PATCH 06/30] Switched back to saving one .pkl for all transformers. The pkl is still a tuple, but the elements are now dictionaries instead of lists. The rest of the code should work the same. --- atomsci/ddm/pipeline/model_wrapper.py | 85 ++++++++++++++------------- 1 file changed, 43 insertions(+), 42 deletions(-) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index 521db40c..06dd7446 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -373,6 +373,7 @@ def create_transformers(self, training_datasets): params.transformer_key: A string pointing to the dataset key containing the transformer in the datastore, or the path to the transformer """ + total_transformers = 0 for k, td in training_datasets.items(): self.transformers[k] = self._create_output_transformers(td) @@ -381,15 +382,16 @@ def create_transformers(self, training_datasets): # Set up transformers for weights, if needed self.transformers_w[k] = trans.create_weight_transformers(self.params, td) - if len(self.transformers[k]) + len(self.transformers_x[k]) + len(self.transformers_w[k]) > 0: + total_transformers = len(self.transformers[k]) + len(self.transformers_x[k]) + len(self.transformers_w[k]) - # Transformers are no longer saved as separate datastore objects; they are included in the model tarball - self.params.transformer_key = os.path.join(self.output_dir, f'transformers_{k}.pkl') - with open(self.params.transformer_key, 'wb') as txfmrpkl: - pickle.dump((self.transformers[k], self.transformers_x[k], self.transformers_w[k]), txfmrpkl) - self.log.info("Wrote transformers to %s" % self.params.transformer_key) - self.params.transformer_oid = "" - self.params.transformer_bucket = "" + if total_transformers > 0: + # Transformers are no longer saved as separate datastore objects; they are included in the model tarball + self.params.transformer_key = os.path.join(self.output_dir, 'transformers.pkl') + with open(self.params.transformer_key, 'wb') as txfmrpkl: + pickle.dump((self.transformers, self.transformers_x, self.transformers_w), txfmrpkl) + self.log.info("Wrote transformers to %s" % self.params.transformer_key) + self.params.transformer_oid = "" + self.params.transformer_bucket = "" # **************************************************************************************** @@ -403,42 +405,39 @@ def reload_transformers(self): if not trans.transformers_needed(self.params): return - for i in trans.get_transformer_keys(self.params): - # for backwards compatibity if this file exists, all folds use the same transformers - local_path = f"{self.output_dir}/transformers.pkl" - if not os.path.exists(local_path): - local_path = f"{self.output_dir}/transformers_{i}.pkl" - - if os.path.exists(local_path): - self.log.info(f"Reloading transformers from model tarball {local_path}") - with open(local_path, 'rb') as txfmr: - transformers_tuple = pickle.load(txfmr) - else: - if self.params.transformer_key is not None: - if self.params.save_results: - self.log.info(f"Reloading transformers from datastore key {self.params.transformer_key}") - transformers_tuple = dsf.retrieve_dataset_by_datasetkey( - dataset_key = self.params.transformer_key, - bucket = self.params.transformer_bucket, - client = self.ds_client ) - else: - self.log.info(f"Reloading transformers from file {self.params.transformer_key}") - with open(self.params.transformer_key, 'rb') as txfmr: - transformers_tuple = pickle.load(txfmr) + # for backwards compatibity if this file exists, all folds use the same transformers + local_path = f"{self.output_dir}/transformers.pkl" + + if os.path.exists(local_path): + self.log.info(f"Reloading transformers from model tarball {local_path}") + with open(local_path, 'rb') as txfmr: + transformers_tuple = pickle.load(txfmr) + else: + if self.params.transformer_key is not None: + if self.params.save_results: + self.log.info(f"Reloading transformers from datastore key {self.params.transformer_key}") + transformers_tuple = dsf.retrieve_dataset_by_datasetkey( + dataset_key = self.params.transformer_key, + bucket = self.params.transformer_bucket, + client = self.ds_client ) else: - # Shouldn't happen - raise Exception("Transformers needed to reload model, but no transformer_key specified.") + self.log.info(f"Reloading transformers from file {self.params.transformer_key}") + with open(self.params.transformer_key, 'rb') as txfmr: + transformers_tuple = pickle.load(txfmr) + else: + # Shouldn't happen + raise Exception("Transformers needed to reload model, but no transformer_key specified.") - if len(transformers_tuple) == 3: - ty, tx, tw = transformers_tuple - else: - ty, tx = transformers_tuple - tw = [] + if len(transformers_tuple) == 3: + ty, tx, tw = transformers_tuple + else: + ty, tx = transformers_tuple + tw = [] - self.transformers[i] = ty - self.transformers_x[i] = tx - self.transformers_w[i] = tw + self.transformers = ty + self.transformers_x = tx + self.transformers_w = tw # **************************************************************************************** @@ -1072,7 +1071,7 @@ def generate_predictions(self, dataset): """ pred, std = None, None self.log.info("Predicting values for current model") - + dataset = self.transform_dataset(dataset, 'final') # For deepchem's predict_uncertainty function, you are not allowed to specify transformers. That means that the # predictions are being made in the transformed space, not the original space. We call undo_transforms() to generate # the transformed predictions. To transform the standard deviations, we rely on the fact that at present we only use @@ -1907,6 +1906,7 @@ def generate_predictions(self, dataset): pred, std = None, None self.log.info("Evaluating current model") + dataset = self.transform_dataset(dataset, 'final') pred = self.model.predict(dataset, self.transformers['final']) ncmpds = pred.shape[0] pred = pred.reshape((ncmpds,1,-1)) @@ -2272,7 +2272,8 @@ def generate_predictions(self, dataset): pred, std = None, None self.log.warning("Evaluating current model") - pred = self.model.predict(dataset, self.transformers) + dataset = self.transform_dataset(dataset, 'final') + pred = self.model.predict(dataset, self.transformers['final']) ncmpds = pred.shape[0] pred = pred.reshape((ncmpds, 1, -1)) self.log.warning("uncertainty not supported by xgboost models") From cd14bd9c83ac3a5b1ecee0a71f3002f58493c338 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 09:57:16 -0800 Subject: [PATCH 07/30] Fixed issue where _create_*_transformers sometimes would not return a value --- atomsci/ddm/pipeline/model_wrapper.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index 06dd7446..d3c59b22 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -338,6 +338,8 @@ def _create_output_transformers(self, dataset): # TODO: Just a warning, we may have response transformers for classification datasets in the future if self.params.prediction_type=='regression' and self.params.transformers is True: return [trans.NormalizationTransformerMissingData(transform_y=True, dataset=dataset)] + else: + return [] # **************************************************************************************** @@ -1612,7 +1614,9 @@ def _create_output_transformers(self, dataset): """ # TODO: Just a warning, we may have response transformers for classification datasets in the future if self.params.prediction_type=='regression' and self.params.transformers is True: - self.transformers = [trans.NormalizationTransformerHybrid(transform_y=True, dataset=dataset)] + return [trans.NormalizationTransformerHybrid(transform_y=True, dataset=dataset)] + else: + return [] # **************************************************************************************** class ForestModelWrapper(ModelWrapper): From 00a74af42f46358b62715b1b4ffab8a9be97d6c6 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 10:36:44 -0800 Subject: [PATCH 08/30] Missing model_dataset argument --- atomsci/ddm/pipeline/model_wrapper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index d3c59b22..f9bddcca 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -905,8 +905,8 @@ def train_kfold_cv(self, pipeline): for ei in LCTimerKFoldIterator(self.params, pipeline, self.log): # Create PerfData structures that are only used within loop to compute metrics during initial training - train_perf_data = perf.create_perf_data(self.params.prediction_type, 'train') - test_perf_data = perf.create_perf_data(self.params.prediction_type, 'test') + train_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'train') + test_perf_data = perf.create_perf_data(self.params.prediction_type, pipeline.data, 'test') for k in range(num_folds): self.model = models[k] train_dset, valid_dset = pipeline.data.train_valid_dsets[k] From 9cb3f4dac35fb99adc3ace7e62c414f53a1b40fd Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 11:31:42 -0800 Subject: [PATCH 09/30] Removed double nested list --- atomsci/ddm/pipeline/model_wrapper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index f9bddcca..dd08cc2f 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -916,8 +916,8 @@ def train_kfold_cv(self, pipeline): # We turn off automatic checkpointing - we only want to save a checkpoints for the final model. self.model.fit(train_dset, nb_epoch=1, checkpoint_interval=0, restore=False) - train_pred = self.model.predict(train_dset, [self.transformers[k]]) - test_pred = self.model.predict(test_dset, [self.transformers[k]]) + train_pred = self.model.predict(train_dset, self.transformers[k]) + test_pred = self.model.predict(test_dset, self.transformers[k]) train_perf = train_perf_data.accumulate_preds(train_pred, train_dset.ids) test_perf = test_perf_data.accumulate_preds(test_pred, test_dset.ids) From 72172a258b804ec137a3847991f08ceccc108235 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 11:55:57 -0800 Subject: [PATCH 10/30] Updated transformer path in test --- atomsci/ddm/test/unit/test_model_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomsci/ddm/test/unit/test_model_wrapper.py b/atomsci/ddm/test/unit/test_model_wrapper.py index fc43206f..ac4f746b 100644 --- a/atomsci/ddm/test/unit/test_model_wrapper.py +++ b/atomsci/ddm/test/unit/test_model_wrapper.py @@ -142,7 +142,7 @@ def test_super_create_transformers(): test.append(isinstance(mdl.transformers['final'][0], dc.trans.transformers.NormalizationTransformer)) test.append(mdl.transformers_x['final'] == []) #testing saving of transformer to correct location: - transformer_path = os.path.join(mdl.output_dir, 'transformers_final.pkl') + transformer_path = os.path.join(mdl.output_dir, 'transformers.pkl') test.append(os.path.isfile(transformer_path)) From 9863aef3c738d68d8ca454d22e8f772c54228b86 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 14:00:22 -0800 Subject: [PATCH 11/30] Removed fold argument and added backwards transformer functionality --- atomsci/ddm/pipeline/model_wrapper.py | 16 +++++++- atomsci/ddm/test/unit/test_transformers.py | 46 ++++++++++++++++++++++ 2 files changed, 60 insertions(+), 2 deletions(-) create mode 100644 atomsci/ddm/test/unit/test_transformers.py diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index dd08cc2f..b45eb2b2 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -434,9 +434,21 @@ def reload_transformers(self): if len(transformers_tuple) == 3: ty, tx, tw = transformers_tuple else: + # this must be very old, we no longer save just 2 transformers ty, tx = transformers_tuple + # ensure that this is an old model where transformers are still lists + assert isinstance(ty, list) tw = [] + # this is for backwards compatibility, if ty, tx, tw are lists, convert them to dictionaries. + if isinstance(ty, list): + # this is an old model. Only one set of transformers + for k in trans.get_transformer_keys(self.params): + self.transformers[k] = ty + self.transformers_x[k] = tx + self.transformers_w[k] = tw + else: + # these are new transformers. They are dictionaries self.transformers = ty self.transformers_x = tx self.transformers_w = tw @@ -926,7 +938,7 @@ def train_kfold_cv(self, pipeline): def make_pred(x): return self.model.predict(x, self.transformers[k]) em.set_make_pred(make_pred) - valid_perf = em.accumulate(ei, subset='valid', dset=valid_dset, transforms=self.transformers[k]) + valid_perf = em.accumulate(ei, subset='valid', dset=valid_dset) self.log.info("Fold %d, epoch %d: training %s = %.3f, validation %s = %.3f, test %s = %.3f" % ( k, ei, pipeline.metric_type, train_perf, pipeline.metric_type, valid_perf, pipeline.metric_type, test_perf)) @@ -954,7 +966,7 @@ def make_pred(x): for ei in range(self.best_epoch+1): self.model.fit(fit_dataset, nb_epoch=1, checkpoint_interval=0, restore=False) - train_perf, test_perf = em.update_epoch(ei, train_dset=fit_dataset, test_dset=test_dset, fold='train_valid') + train_perf, test_perf = em.update_epoch(ei, train_dset=fit_dataset, test_dset=test_dset) self.log.info(f"Combined folds: Epoch {ei}, training {pipeline.metric_type} = {train_perf:.3}," + f"test {pipeline.metric_type} = {test_perf:.3}") diff --git a/atomsci/ddm/test/unit/test_transformers.py b/atomsci/ddm/test/unit/test_transformers.py new file mode 100644 index 00000000..f5599bdf --- /dev/null +++ b/atomsci/ddm/test/unit/test_transformers.py @@ -0,0 +1,46 @@ +import atomsci.ddm.pipeline.transformations as trans +import numpy as np +from deepchem.data import NumpyDataset + + +def test_no_missing_values(): + y = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) + w = np.array([[1, 1], [1, 1], [1, 1]]) + x = np.ones_like(y) + ids = np.array(range(len(y))) + dataset = NumpyDataset(X=x, y=y, w=w, ids=ids) + y_means, y_stds = trans.get_statistics_missing_ydata(dataset) + np.testing.assert_array_almost_equal(y_means, [3.0, 4.0]) + np.testing.assert_array_almost_equal(y_stds, [1.632993, 1.632993]) + +def test_some_missing_values(): + y = np.array([[1.0, np.nan], [3.0, 4.0], [5.0, 6.0]]) + w = np.array([[1, 0], [1, 1], [1, 1]]) + x = np.ones_like(y) + ids = np.array(range(len(y))) + dataset = NumpyDataset(X=x, y=y, w=w, ids=ids) + y_means, y_stds = trans.get_statistics_missing_ydata(dataset) + np.testing.assert_array_almost_equal(y_means, [3.0, 5.0]) + np.testing.assert_array_almost_equal(y_stds, [1.632993, 1.0]) + +def test_all_missing_values(): + y = np.array([[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]]) + w = np.array([[0, 0], [0, 0], [0, 0]]) + x = np.ones_like(y) + ids = np.array(range(len(y))) + dataset = NumpyDataset(X=x, y=y, w=w, ids=ids) + y_means, y_stds = trans.get_statistics_missing_ydata(dataset) + np.testing.assert_array_almost_equal(y_means, [0.0, 0.0]) + np.testing.assert_array_almost_equal(y_stds, [0.0, 0.0]) + +def test_one_task_no_missing_values(): + y = np.array([[1.0], [3.0], [5.0]]) + w = np.array([[1, 1], [1, 1], [1, 1]]) + x = np.ones_like(y) + ids = np.array(range(len(y))) + dataset = NumpyDataset(X=x, y=y, w=w, ids=ids) + y_means, y_stds = trans.get_statistics_missing_ydata(dataset) + np.testing.assert_array_almost_equal(y_means, [3.0]) + np.testing.assert_array_almost_equal(y_stds, [1.632993]) + + From a40dffaade71cecb9a7b770843deaaa6582587fc Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 14:52:58 -0800 Subject: [PATCH 12/30] Removed a few 'final' arguments that are no longer used --- atomsci/ddm/pipeline/perf_plots.py | 4 ++-- atomsci/ddm/test/integrative/hybrid/test_hybrid.py | 2 +- atomsci/ddm/utils/hyperparam_search_wrapper.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/atomsci/ddm/pipeline/perf_plots.py b/atomsci/ddm/pipeline/perf_plots.py index 2f705c12..6dcf592d 100644 --- a/atomsci/ddm/pipeline/perf_plots.py +++ b/atomsci/ddm/pipeline/perf_plots.py @@ -138,7 +138,7 @@ def plot_pred_vs_actual(model, epoch_label='best', threshold=None, error_bars=Fa subs_pred_df=pred_df[pred_df.subset==subset] perf_data = wrapper.get_perf_data(subset, epoch_label) - pred_results = perf_data.get_prediction_results('final') + pred_results = perf_data.get_prediction_results() # pred_df=pfm.predict_from_pipe(model) std=len([x for x in pred_df.columns if 'std' in x]) > 0 if perf_data.num_tasks > 1: @@ -164,7 +164,7 @@ def plot_pred_vs_actual(model, epoch_label='best', threshold=None, error_bars=Fa # % binding / inhibition data, with one row per subset. for s, subset in enumerate(subsets): perf_data = wrapper.get_perf_data(subset, epoch_label) - pred_results = perf_data.get_prediction_results('final') + pred_results = perf_data.get_prediction_results() y_actual = perf_data.get_real_values('final') ids, y_pred, y_std = perf_data.get_pred_values('final') r2 = pred_results['r2_score'] diff --git a/atomsci/ddm/test/integrative/hybrid/test_hybrid.py b/atomsci/ddm/test/integrative/hybrid/test_hybrid.py index d05255a3..cc504103 100644 --- a/atomsci/ddm/test/integrative/hybrid/test_hybrid.py +++ b/atomsci/ddm/test/integrative/hybrid/test_hybrid.py @@ -52,7 +52,7 @@ def test(): print("Check the model performance on validation data") pred_data = pl.model_wrapper.get_perf_data(subset="valid", epoch_label="best") - pred_results = pred_data.get_prediction_results('final') + pred_results = pred_data.get_prediction_results() print(pred_results) pred_score = pred_results['r2_score'] diff --git a/atomsci/ddm/utils/hyperparam_search_wrapper.py b/atomsci/ddm/utils/hyperparam_search_wrapper.py index 9c5e6861..264ec6d9 100755 --- a/atomsci/ddm/utils/hyperparam_search_wrapper.py +++ b/atomsci/ddm/utils/hyperparam_search_wrapper.py @@ -1564,7 +1564,7 @@ def lossfn(p): for subset in subsets: if not model_failed: perf_data = pl.model_wrapper.get_perf_data(subset=subset, epoch_label="best") - sub_pred_results = perf_data.get_prediction_results('final') + sub_pred_results = perf_data.get_prediction_results() else: if tparam.prediction_type == "regression": sub_pred_results = {"r2_score": 0, "rms_score": 100} From 6332c60a8b4d538ff36bd2cd6683a05ca155793c Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 15:00:08 -0800 Subject: [PATCH 13/30] Removed unused imports --- atomsci/ddm/pipeline/model_datasets.py | 1 - atomsci/ddm/pipeline/model_wrapper.py | 1 - 2 files changed, 2 deletions(-) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index 36f6be3b..b01120ff 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -6,7 +6,6 @@ from deepchem.data import NumpyDataset import numpy as np import pandas as pd -import deepchem as dc import uuid from atomsci.ddm.pipeline import featurization as feat from atomsci.ddm.pipeline import splitting as split diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index b45eb2b2..f637940b 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -8,7 +8,6 @@ import joblib import deepchem as dc -import deepchem.trans as dctrans import numpy as np import tensorflow as tf if dc.__version__.startswith('2.1'): From 398cf0614fa0f56f077d19cc050f97339f81f95b Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 11 Dec 2024 15:50:57 -0800 Subject: [PATCH 14/30] specified fold for embedding features --- atomsci/ddm/pipeline/featurization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomsci/ddm/pipeline/featurization.py b/atomsci/ddm/pipeline/featurization.py index b3e96be6..294b432d 100644 --- a/atomsci/ddm/pipeline/featurization.py +++ b/atomsci/ddm/pipeline/featurization.py @@ -1021,7 +1021,7 @@ def featurize_data(self, dset_df, params, contains_responses): weights = input_dataset.w attr = input_model_dataset.attr - input_dataset = self.embedding_pipeline.model_wrapper.transform_dataset(input_dataset) + input_dataset = self.embedding_pipeline.model_wrapper.transform_dataset(input_dataset, fold='final') # Run the embedding model to generate features. embedding = self.embedding_pipeline.model_wrapper.generate_embeddings(input_dataset) From 218f70d337fdea087e8a5edc0370722bd410eaaa Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 08:28:27 -0800 Subject: [PATCH 15/30] More tests for perf_data --- atomsci/ddm/pipeline/perf_data.py | 2 +- ...perf_data_KFoldClassificationPerfData.json | 17 ++ ...fig_perf_data_KFoldRegressoinPerfData.json | 18 ++ ...erf_data_KFoldRegressoinPerfDataMulti.json | 17 ++ ...erf_data_SimpleClassificationPerfData.json | 17 ++ ...ig_perf_data_SimpleRegressionPerfData.json | 17 ++ atomsci/ddm/test/unit/test_perf_data.py | 267 ++++++++++++++++++ 7 files changed, 354 insertions(+), 1 deletion(-) create mode 100644 atomsci/ddm/test/unit/config_perf_data_KFoldClassificationPerfData.json create mode 100644 atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfData.json create mode 100644 atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfDataMulti.json create mode 100644 atomsci/ddm/test/unit/config_perf_data_SimpleClassificationPerfData.json create mode 100644 atomsci/ddm/test/unit/config_perf_data_SimpleRegressionPerfData.json create mode 100644 atomsci/ddm/test/unit/test_perf_data.py diff --git a/atomsci/ddm/pipeline/perf_data.py b/atomsci/ddm/pipeline/perf_data.py index 6d0590da..727958af 100644 --- a/atomsci/ddm/pipeline/perf_data.py +++ b/atomsci/ddm/pipeline/perf_data.py @@ -1638,7 +1638,7 @@ def accumulate_preds(self, predicted_vals, ids, pred_stds=None): if self.num_classes > 2: # If more than 2 classes, real_vals is indicator matrix (one-hot encoded). task_real_vals = np.squeeze(real_vals[nzrows,i,:]) - task_class_probs =np.squeeze(class_probs[nzrows,i,:]) + task_class_probs = np.squeeze(class_probs[nzrows,i,:]) scores.append(roc_auc_score(task_real_vals, task_class_probs, average='macro')) else: diff --git a/atomsci/ddm/test/unit/config_perf_data_KFoldClassificationPerfData.json b/atomsci/ddm/test/unit/config_perf_data_KFoldClassificationPerfData.json new file mode 100644 index 00000000..5b0a19b1 --- /dev/null +++ b/atomsci/ddm/test/unit/config_perf_data_KFoldClassificationPerfData.json @@ -0,0 +1,17 @@ +{"verbose": "True", + "datastore": "False", + "save_results": "False", + "model_type": "NN", + "featurizer": "ecfp", + "split_strategy": "k_fold_cv", + "splitter": "random", + "split_test_frac": "0.15", + "split_valid_frac": "0.15", + "transformers": "True", + "dataset_key": "replaced", + "id_col": "compound_id", + "response_cols":"active", + "smiles_col": "base_rdkit_smiles", + "max_epochs":"2", + "prediction_type": "classification", + "result_dir": "replaced"} \ No newline at end of file diff --git a/atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfData.json b/atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfData.json new file mode 100644 index 00000000..b2ea5bd8 --- /dev/null +++ b/atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfData.json @@ -0,0 +1,18 @@ +{"verbose": "True", + "datastore": "False", + "save_results": "False", + "model_type": "NN", + "featurizer": "ecfp", + "split_strategy": "k_fold_cv", + "splitter": "random", + "split_test_frac": "0.15", + "split_valid_frac": "0.15", + "transformers": "True", + "dataset_key": "replaced", + "id_col": "compound_id", + "response_cols":"pIC50", + "smiles_col": "base_rdkit_smiles", + "max_epochs":"2", + "prediction_type": "regression", + "result_dir": "replaced" +} \ No newline at end of file diff --git a/atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfDataMulti.json b/atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfDataMulti.json new file mode 100644 index 00000000..9b3512a6 --- /dev/null +++ b/atomsci/ddm/test/unit/config_perf_data_KFoldRegressoinPerfDataMulti.json @@ -0,0 +1,17 @@ +{"verbose": "True", + "datastore": "False", + "save_results": "False", + "model_type": "NN", + "featurizer": "ecfp", + "split_strategy": "k_fold_cv", + "splitter": "random", + "split_test_frac": "0.15", + "split_valid_frac": "0.15", + "transformers": "True", + "dataset_key": "replaced", + "id_col": "compound_id", + "response_cols":["pIC50", "pIC50_dupe"], + "smiles_col": "base_rdkit_smiles", + "max_epochs":"2", + "prediction_type": "regression", + "result_dir":"replaced"} diff --git a/atomsci/ddm/test/unit/config_perf_data_SimpleClassificationPerfData.json b/atomsci/ddm/test/unit/config_perf_data_SimpleClassificationPerfData.json new file mode 100644 index 00000000..1017c83f --- /dev/null +++ b/atomsci/ddm/test/unit/config_perf_data_SimpleClassificationPerfData.json @@ -0,0 +1,17 @@ +{"verbose": "True", + "datastore": "False", + "save_results": "False", + "model_type": "NN", + "featurizer": "ecfp", + "split_strategy": "train_valid_test", + "splitter": "random", + "split_test_frac": "0.15", + "split_valid_frac": "0.15", + "transformers": "True", + "dataset_key": "replaced", + "id_col": "compound_id", + "response_cols":"active", + "smiles_col": "base_rdkit_smiles", + "max_epochs":"2", + "prediction_type": "classification", + "result_dir": "replaced"} \ No newline at end of file diff --git a/atomsci/ddm/test/unit/config_perf_data_SimpleRegressionPerfData.json b/atomsci/ddm/test/unit/config_perf_data_SimpleRegressionPerfData.json new file mode 100644 index 00000000..f4361844 --- /dev/null +++ b/atomsci/ddm/test/unit/config_perf_data_SimpleRegressionPerfData.json @@ -0,0 +1,17 @@ +{"verbose": "True", + "datastore": "False", + "save_results": "False", + "model_type": "NN", + "featurizer": "ecfp", + "split_strategy": "train_valid_test", + "splitter": "random", + "split_test_frac": "0.15", + "split_valid_frac": "0.15", + "transformers": "True", + "id_col": "compound_id", + "dataset_key": "replaced", + "response_cols":"pIC50", + "smiles_col": "base_rdkit_smiles", + "max_epochs":"2", + "prediction_type": "regression", + "result_dir": "replaced"} \ No newline at end of file diff --git a/atomsci/ddm/test/unit/test_perf_data.py b/atomsci/ddm/test/unit/test_perf_data.py new file mode 100644 index 00000000..20275fe2 --- /dev/null +++ b/atomsci/ddm/test/unit/test_perf_data.py @@ -0,0 +1,267 @@ +import atomsci.ddm.pipeline.perf_data as perf_data +import atomsci.ddm.pipeline.model_pipeline as model_pipeline +import atomsci.ddm.pipeline.parameter_parser as parse +import os +import tempfile +import deepchem as dc +import numpy as np +import shutil +import pandas as pd +import json + +def copy_to_temp(dskey, res_dir): + new_dskey = shutil.copy(dskey, res_dir) + return new_dskey + +def setup_paths(): + script_path = os.path.dirname(os.path.realpath(__file__)) + res_dir = tempfile.mkdtemp() + dskey = os.path.join(script_path, '../test_datasets/aurka_chembl_base_smiles_union.csv') + tmp_dskey = copy_to_temp(dskey, res_dir) + + return res_dir, tmp_dskey + +def read_params(json_file, res_dir, tmp_dskey): + with open(json_file, 'r') as file: + params = json.load(file) + params['result_dir'] = res_dir + params['dataset_key'] = tmp_dskey + return params + +def make_relative_to_file(relative_path): + script_path = os.path.dirname(os.path.realpath(__file__)) + result = os.path.join(script_path, relative_path) + + return result + +def test_KFoldRegressionPerfData(): + res_dir, tmp_dskey = setup_paths() + + params = read_params(make_relative_to_file('config_perf_data_KFoldRegressoinPerfData.json'), + res_dir, tmp_dskey) + + # setup a pipeline that will be used to create performance data + pparams = parse.wrapper(params) + mp = model_pipeline.ModelPipeline(pparams) + mp.train_model() + + # creat performance data + perf = perf_data.create_perf_data(mp.params.prediction_type, + mp.data, 'train') + + assert isinstance(perf, perf_data.KFoldRegressionPerfData) + + ids = sorted(list(mp.data.combined_training_data().ids)) + weights = perf.get_weights(ids) + assert weights.shape == (len(ids),1) + assert all(weights==1) + + real_vals = perf.get_real_values(ids) + d = dc.data.NumpyDataset(X=np.ones_like(real_vals), y=real_vals, ids=ids, w=np.ones(len(ids))) + + pred_vals = d.y + # This should have r2 of 1 + r2 = perf.accumulate_preds(pred_vals, ids) + assert r2 == 1 + # do a few more folds + r2 = perf.accumulate_preds(pred_vals, ids) + r2 = perf.accumulate_preds(pred_vals, ids) + + (res_ids, res_vals, res_std) = perf.get_pred_values() + (r2_mean, r2_std) = perf.compute_perf_metrics() + + assert np.allclose(res_vals, real_vals) + assert np.allclose(res_std, np.zeros_like(res_std)) + + # perfect score every time + assert r2_mean==1 + assert r2_std==0 + +def test_KFoldRegressionPerfDataMulti(): + res_dir, tmp_dskey = setup_paths() + + # duplicate pIC50 column + df = pd.read_csv(tmp_dskey) + df['pIC50_dupe'] = df['pIC50'] + df.to_csv(tmp_dskey, index=False) + + params = read_params(make_relative_to_file('config_perf_data_KFoldRegressoinPerfDataMulti.json'), + res_dir, tmp_dskey) + + # setup a pipeline that will be used to create performance data + pparams = parse.wrapper(params) + mp = model_pipeline.ModelPipeline(pparams) + mp.train_model() + + # creat performance data + perf = perf_data.create_perf_data(mp.params.prediction_type, + mp.data, 'train') + + assert isinstance(perf, perf_data.KFoldRegressionPerfData) + + ids = sorted(list(mp.data.combined_training_data().ids)) + weights = perf.get_weights(ids) + assert weights.shape == (len(ids),2) + assert np.allclose(weights, np.ones_like(weights)) + + real_vals = perf.get_real_values(ids) + d = dc.data.NumpyDataset(X=np.ones_like(real_vals), y=real_vals, ids=ids, w=np.ones_like(weights)) + + pred_vals = d.y + # This should have r2 of 1 + r2 = perf.accumulate_preds(pred_vals, ids) + assert r2 == 1 + # do a few more folds + r2 = perf.accumulate_preds(pred_vals, ids) + r2 = perf.accumulate_preds(pred_vals, ids) + + (res_ids, res_vals, res_std) = perf.get_pred_values() + (r2_mean, r2_std) = perf.compute_perf_metrics() + + assert np.allclose(res_vals, real_vals) + assert np.allclose(res_std, np.zeros_like(res_std)) + + # perfect score every time + assert r2_mean==1 + assert r2_std==0 + +def test_KFoldClassificationPerfData(): + res_dir, tmp_dskey = setup_paths() + + params = read_params( + make_relative_to_file('config_perf_data_KFoldClassificationPerfData.json'), + res_dir, tmp_dskey) + + # setup a pipeline that will be used to create performance data + pparams = parse.wrapper(params) + mp = model_pipeline.ModelPipeline(pparams) + mp.train_model() + + # creat performance data + perf = perf_data.create_perf_data(mp.params.prediction_type, + mp.data, 'train') + + assert isinstance(perf, perf_data.KFoldClassificationPerfData) + + ids = sorted(list(mp.data.combined_training_data().ids)) + weights = perf.get_weights(ids) + assert weights.shape == (len(ids),1) + assert all(weights==1) + + real_vals = perf.get_real_values(ids) + d = dc.data.NumpyDataset(X=np.ones_like(real_vals), y=real_vals, ids=ids, w=np.ones(len(ids))) + + num_classes = 2 + # input to to_one_hot needs to have the shape (N,) not (N,1) + pred_vals = dc.metrics.to_one_hot(d.y.reshape(len(d.y)), num_classes) + # This should have r2 of 1 + roc_auc_score = perf.accumulate_preds(pred_vals, ids) + assert roc_auc_score == 1 + # do a few more folds + roc_auc_score = perf.accumulate_preds(pred_vals, ids) + roc_auc_score = perf.accumulate_preds(pred_vals, ids) + + (res_ids, res_classes, res_probs, res_std) = perf.get_pred_values() + (roc_auc_mean, roc_auc_std) = perf.compute_perf_metrics() + + # std should be zero + assert all((res_std==np.zeros_like(res_std)).flatten()) + # probs should match predictions + assert all((res_probs==pred_vals.reshape(len(d.y), 1, num_classes)).flatten()) + # all predictions are correct + assert all(res_classes==real_vals) + # perfect score every time + assert roc_auc_mean==1 + assert roc_auc_std==0 + +def test_SimpleRegressionPerfData(): + res_dir, tmp_dskey = setup_paths() + + params = read_params( + make_relative_to_file('config_perf_data_SimpleRegressionPerfData.json'), + res_dir, tmp_dskey) + + # setup a pipeline that will be used to create performance data + pparams = parse.wrapper(params) + mp = model_pipeline.ModelPipeline(pparams) + mp.train_model() + + # creat performance data + perf = perf_data.create_perf_data(mp.params.prediction_type, + mp.data, 'train') + + assert isinstance(perf, perf_data.SimpleRegressionPerfData) + + real_vals = perf.get_real_values() + weights = perf.get_weights() + ids = np.array(range(len(real_vals))) # these are not used by SimpleRegressionPerfData + assert weights.shape == (len(ids),1) + assert all(weights==1) + + d = dc.data.NumpyDataset(X=np.ones_like(real_vals), y=real_vals, + ids=ids, w=np.ones(len(ids))) + + pred_vals = d.y + # This should have r2 of 1 ids are ignored + r2 = perf.accumulate_preds(pred_vals, ids) + assert r2 == 1 + + (res_ids, res_vals, _) = perf.get_pred_values() + (r2_mean, _) = perf.compute_perf_metrics() + + # the predicted values should equal the real values + assert all(real_vals == res_vals) + + # should be a perfect score + assert r2_mean == 1 + +def test_SimpleClassificationPerfData(): + res_dir, tmp_dskey = setup_paths() + + params = read_params( + make_relative_to_file('config_perf_data_SimpleClassificationPerfData.json'), + res_dir, tmp_dskey) + + # setup a pipeline that will be used to create performance data + pparams = parse.wrapper(params) + mp = model_pipeline.ModelPipeline(pparams) + mp.train_model() + + # creat performance data + perf = perf_data.create_perf_data(mp.params.prediction_type, + mp.data, 'train') + + assert isinstance(perf, perf_data.SimpleClassificationPerfData) + + ids = sorted(list(mp.data.train_valid_dsets[0][0].ids)) + weights = perf.get_weights() + real_vals = perf.get_real_values() + assert weights.shape == (len(ids),1) + assert all(weights==1) + + d = dc.data.NumpyDataset(X=np.ones_like(real_vals), y=real_vals, ids=ids, w=np.ones(len(ids))) + + num_classes = 2 + # input to to_one_hot needs to have the shape (N,) not (N,1) + pred_vals = dc.metrics.to_one_hot(d.y.reshape(len(d.y)), num_classes) + # This should have r2 of 1 + roc_auc_score = perf.accumulate_preds(pred_vals, ids=ids) + assert roc_auc_score == 1 + + (res_ids, res_classes, res_probs, _) = perf.get_pred_values() + (roc_auc_mean, _) = perf.compute_perf_metrics() + + # probs should match predictions + assert all((res_probs==pred_vals.reshape(len(d.y), 1, num_classes)).flatten()) + # all predictions are correct + assert all(res_classes==real_vals) + # perfect score every time + assert roc_auc_mean==1 + + +if __name__ == "__main__": + test_KFoldRegressionPerfDataMulti() + test_KFoldRegressionPerfData() + test_SimpleClassificationPerfData() + test_KFoldClassificationPerfData() + test_SimpleRegressionPerfData() \ No newline at end of file From 78f7e35fa7cf6cabae53884b531f0cee38963e36 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 15:35:14 -0800 Subject: [PATCH 16/30] Test to make sure transformers are fit correctly on training data only --- .../jsons/balancing_transformer.json | 29 ++ .../jsons/wo_balancing_transformer.json | 27 ++ .../test_balancing_transformer.py | 265 ++++++++---------- .../test/integrative/make_test_datasets.py | 156 +++++++++++ atomsci/ddm/test/unit/test_transformers.py | 48 ++++ 5 files changed, 376 insertions(+), 149 deletions(-) create mode 100644 atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json create mode 100644 atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json create mode 100644 atomsci/ddm/test/integrative/make_test_datasets.py diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json new file mode 100644 index 00000000..3a9b127f --- /dev/null +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json @@ -0,0 +1,29 @@ +{ + "dataset_key" : "replaced", + "datastore" : "False", + "uncertainty": "False", + "splitter": "scaffold", + "split_valid_frac": "0.20", + "split_test_frac": "0.20", + "split_strategy": "train_valid_test", + "prediction_type": "classification", + "model_choice_score_type": "roc_auc", + "response_cols" : "active", + "id_col": "compound_id", + "smiles_col" : "rdkit_smiles", + "result_dir": "replaced", + "system": "LC", + "transformers": "True", + "model_type": "NN", + "featurizer": "computed_descriptors", + "descriptor_type": "rdkit_raw", + "weight_transform_type": "balancing", + "learning_rate": ".0007", + "layer_sizes": "512,128", + "dropouts": "0.3,0.3", + "save_results": "False", + "max_epochs": "2", + "early_stopping_patience": "2", + "verbose": "False", + "seed":"0" + } \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json new file mode 100644 index 00000000..00c5160c --- /dev/null +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json @@ -0,0 +1,27 @@ +{ + "dataset_key" : "replaced", + "datastore" : "False", + "uncertainty": "False", + "splitter": "scaffold", + "split_valid_frac": "0.20", + "split_test_frac": "0.20", + "split_strategy": "train_valid_test", + "prediction_type": "classification", + "model_choice_score_type": "roc_auc", + "response_cols" : "active", + "id_col": "compound_id", + "smiles_col" : "rdkit_smiles", + "result_dir": "replaced", + "system": "LC", + "transformers": "True", + "model_type": "NN", + "featurizer": "ecfp", + "learning_rate": ".0007", + "layer_sizes": "512,128", + "dropouts": "0.3,0.3", + "save_results": "False", + "max_epochs": "2", + "early_stopping_patience": "2", + "verbose": "False", + "seed":"0" + } \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py index 9e5cd0eb..92c52687 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py +++ b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py @@ -1,168 +1,135 @@ -import pandas as pd import tempfile import atomsci.ddm.pipeline.parameter_parser as parse import atomsci.ddm.pipeline.model_pipeline as mp +import atomsci.ddm.pipeline.transformations as trans +import numpy as np +import os +import json + +import sys +sys.path.append(os.path.join(os.path.dirname(__file__), '..')) +import make_test_datasets + import logging logger = logging.getLogger(__name__) -nreps = 10 -metrics = [] -vals = [] -balanced = [] -subset = [] - def test_balancing_transformer(): - dset_key = '../../test_datasets/MRP3_dataset.csv' - dset_df = pd.read_csv(dset_key) + dset_key = make_relative_to_file('../../test_datasets/MRP3_dataset.csv') res_dir = tempfile.mkdtemp() - split_uuid = create_scaffold_split(dset_key, res_dir) - - # train the model without the balancing - train_model_wo_balan(dset_key, split_uuid, res_dir) - # train the model with the balancing parameter - train_model_w_balan(dset_key, split_uuid, res_dir) - - metrics_df = pd.DataFrame(dict(subset=subset, balanced=balanced, metric=metrics, val=vals)) - - # check the recall_score - rec_df = metrics_df[metrics_df.metric == 'recall_score'] - - not_balanced_series = rec_df[(rec_df.balanced == 'no')].groupby("subset").val.mean() - balanced_series = rec_df[(rec_df.balanced == 'yes')].groupby("subset").val.mean() - - assert((balanced_series['test'] > not_balanced_series['test']) & (balanced_series['valid'] > not_balanced_series['valid']) ) - -def create_scaffold_split(dset_key, res_dir): - params = { - "dataset_key" : dset_key, - "datastore" : "False", - "uncertainty": "False", - "splitter": "scaffold", - "split_valid_frac": "0.1", - "split_test_frac": "0.1", - "split_strategy": "train_valid_test", - "previously_split": "False", - "prediction_type": "classification", - "model_choice_score_type": "roc_auc", - "response_cols" : "active", - "id_col": "compound_id", - "smiles_col" : "rdkit_smiles", - "result_dir": res_dir, - "system": "LC", - "transformers": "True", - "model_type": "NN", - "featurizer": "computed_descriptors", - "descriptor_type": "rdkit_raw", - "learning_rate": ".0007", - "layer_sizes": "512,128", - "dropouts": "0.3,0.3", - "save_results": "False", - "max_epochs": "500", - "early_stopping_patience": "50", - "verbose": "False" - } + balanced_params = params_w_balan(dset_key, res_dir) + balanced_weights = make_pipeline_and_get_weights(balanced_params) + (major_weight, minor_weight), (major_count, minor_count) = np.unique(balanced_weights, return_counts=True) + assert major_weight < minor_weight + assert major_count > minor_count + + nonbalanced_params = params_wo_balan(dset_key, res_dir) + nonbalanced_weights = make_pipeline_and_get_weights(nonbalanced_params) + (weight,), (count,) = np.unique(nonbalanced_weights, return_counts=True) + assert weight == 1 + +def test_all_transformers(): + res_dir = tempfile.mkdtemp() + dskey = os.path.join(res_dir, 'special_test_dset.csv') + params = params_w_balan(dskey, res_dir) + make_test_datasets.make_test_dataset_and_split(dskey, params['descriptor_type']) + + params['previously_featurized'] = True + params['previously_split'] = True + params['splitter'] = 'index' + params['split_uuid'] = 'testsplit' + params['split_strategy'] = 'train_valid_test' + params['response_cols'] = ['class'] + + # check that the transformers are correct + model_pipeline = make_pipeline(params) + assert(len(model_pipeline.model_wrapper.transformers[0])==0) + + assert(len(model_pipeline.model_wrapper.transformers_x[0])==1) + transformer_x = model_pipeline.model_wrapper.transformers_x[0][0] + assert(isinstance(transformer_x, trans.NormalizationTransformerMissingData)) + + assert(len(model_pipeline.model_wrapper.transformers_w[0])==1) + assert(isinstance(model_pipeline.model_wrapper.transformers_w[0][0], trans.BalancingTransformer)) + + # check that the transforms are correct + train_dset = model_pipeline.data.train_valid_dsets[0][0] + trans_train_dset = model_pipeline.model_wrapper.transform_dataset(train_dset, fold=0) + + # mean should be nearly 0 + assert abs(np.mean(trans_train_dset.X) - 0) < 1e-4 + np.testing.assert_array_almost_equal(transformer_x.X_means, np.zeros_like(transformer_x.X_means)) + # std should be nearly 2 + assert abs(np.std(trans_train_dset.X) - 1) < 1e-4 + np.testing.assert_array_almost_equal(transformer_x.X_stds, np.ones_like(transformer_x.X_stds)*2) + # there is an 80/20 imbalance in classification + weights = trans_train_dset.w + (weight1, weight2), (count1, count2) = np.unique(weights, return_counts=True) + assert (weight1*count1 - weight2*count2) < 1e-3 + + # validation set has a different distribution + valid_dset = model_pipeline.data.train_valid_dsets[0][1] + trans_valid_dset = model_pipeline.model_wrapper.transform_dataset(valid_dset, fold=0) + + # untransformed mean is 10 expected transformed mean is (10 - 0) / 2 + assert abs(np.mean(trans_valid_dset.X) - 5) < 1e-4 + # untransformed std is 5 expected transformed std is 5/2 + assert abs(np.std(trans_valid_dset.X) - (2.5)) + # validation has a 50/50 split. Majority class * 4 should equal oversampled minority class + valid_weights = trans_valid_dset.w + (valid_weight1, valid_weight2), (valid_count1, valid_count2) = np.unique(valid_weights, return_counts=True) + assert (valid_weight1*valid_count1*4 - valid_weight2*valid_count2) < 1e-4 + + +def make_pipeline(params): pparams = parse.wrapper(params) - MP = mp.ModelPipeline(pparams) + model_pipeline = mp.ModelPipeline(pparams) + model_pipeline.train_model() + + return model_pipeline + +def make_pipeline_and_get_weights(params): + model_pipeline = make_pipeline(params) - split_uuid = MP.split_dataset() - return split_uuid + print(model_pipeline.model_wrapper.transformers_w) + print(np.unique(model_pipeline.data.train_valid_dsets[0][0].y, return_counts=True)) + return model_pipeline.data.train_valid_dsets[0][0].w -def train_model_wo_balan(dset_key, split_uuid, res_dir): +def make_relative_to_file(relative_path): + script_path = os.path.dirname(os.path.realpath(__file__)) + result = os.path.join(script_path, relative_path) + + return result + +def read_params(json_file, tmp_dskey, res_dir): + with open(json_file, 'r') as file: + params = json.load(file) + params['result_dir'] = res_dir + params['dataset_key'] = tmp_dskey + return params + +def params_wo_balan(dset_key, res_dir): # Train classification models without balancing weights. Repeat this several times so we can get some statistics on the performance metrics. - params = { - "dataset_key" : dset_key, - "datastore" : "False", - "uncertainty": "False", - "splitter": "scaffold", - "split_valid_frac": "0.1", - "split_test_frac": "0.1", - "split_strategy": "train_valid_test", - "previously_split": "True", - "split_uuid": split_uuid, - "prediction_type": "classification", - "model_choice_score_type": "roc_auc", - "response_cols" : "active", - "id_col": "compound_id", - "smiles_col" : "rdkit_smiles", - "result_dir": res_dir, - "system": "LC", - "transformers": "True", - "model_type": "NN", - "featurizer": "computed_descriptors", - "descriptor_type": "rdkit_raw", - "learning_rate": ".0007", - "layer_sizes": "512,128", - "dropouts": "0.3,0.3", - "save_results": "False", - "max_epochs": "500", - "early_stopping_patience": "50", - "verbose": "False" - } - - for i in range(nreps): - pparams = parse.wrapper(params) - MP = mp.ModelPipeline(pparams) - MP.train_model() - wrapper = MP.model_wrapper - - for ss in ['valid', 'test']: - metvals = wrapper.get_pred_results(ss, 'best') - for metric in ['roc_auc_score', 'prc_auc_score', 'cross_entropy', 'precision', 'recall_score', 'npv', 'accuracy_score', 'bal_accuracy', 'kappa','matthews_cc']: - subset.append(ss) - balanced.append('no') - metrics.append(metric) - vals.append(metvals[metric]) - -def train_model_w_balan(dset_key, split_uuid, res_dir): + params = read_params( + make_relative_to_file('jsons/wo_balancing_transformer.json'), + dset_key, + res_dir) + + return params + +def params_w_balan(dset_key, res_dir): # Now train models on the same dataset with balancing weights - params = { - "dataset_key" : dset_key, - "datastore" : "False", - "uncertainty": "False", - "splitter": "scaffold", - "split_valid_frac": "0.1", - "split_test_frac": "0.1", - "split_strategy": "train_valid_test", - "previously_split": "True", - "split_uuid": split_uuid, - "prediction_type": "classification", - "model_choice_score_type": "roc_auc", - "response_cols" : "active", - "id_col": "compound_id", - "smiles_col" : "rdkit_smiles", - "result_dir": res_dir, - "system": "LC", - "transformers": "True", - "model_type": "NN", - "featurizer": "computed_descriptors", - "descriptor_type": "rdkit_raw", - "weight_transform_type": "balancing", - "learning_rate": ".0007", - "layer_sizes": "512,128", - "dropouts": "0.3,0.3", - "save_results": "False", - "max_epochs": "500", - "early_stopping_patience": "50", - "verbose": "False" - } - - for i in range(nreps): - pparams = parse.wrapper(params) - MP = mp.ModelPipeline(pparams) - MP.train_model() - wrapper = MP.model_wrapper - - for ss in ['valid', 'test']: - metvals = wrapper.get_pred_results(ss, 'best') - for metric in ['roc_auc_score', 'prc_auc_score', 'cross_entropy', 'precision', 'recall_score', 'npv', 'accuracy_score', 'bal_accuracy', 'kappa','matthews_cc']: - subset.append(ss) - balanced.append('yes') - metrics.append(metric) - vals.append(metvals[metric]) + params = read_params( + make_relative_to_file('jsons/balancing_transformer.json'), + dset_key, + res_dir + ) + + return params if __name__ == '__main__': - test_balancing_transformer() \ No newline at end of file + test_all_transformers() + #test_balancing_transformer() \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/make_test_datasets.py b/atomsci/ddm/test/integrative/make_test_datasets.py new file mode 100644 index 00000000..27cae79c --- /dev/null +++ b/atomsci/ddm/test/integrative/make_test_datasets.py @@ -0,0 +1,156 @@ +import os +import numpy as np +import pandas as pd + +def get_absolute_path(relative_path): + """ + Calculate the absolute path given a path relative to the location of this file. + + Parameters: + relative_path (str): The relative path to convert. + + Returns: + str: The absolute path. + """ + # Get the directory of the current file + current_dir = os.path.dirname(os.path.abspath(__file__)) + # Join the current directory with the relative path + absolute_path = os.path.join(current_dir, relative_path) + return absolute_path + +def get_features(feature_type): + desc_spec_file = get_absolute_path('../../data/descriptor_sets_sources_by_descr_type.csv') + desc_spec_df = pd.read_csv(desc_spec_file, index_col=False) + + rows = desc_spec_df[desc_spec_df['descr_type']==feature_type] + assert len(rows)==1 + descriptors = rows['descriptors'].values[0].split(';') + + return descriptors + +def rescale(array, desired_mean, desired_std): + """ + Rescale the input array to have the desired mean and standard deviation. + + Parameters: + array (np.ndarray): Input array to be rescaled. + desired_mean (float): Desired mean of the rescaled array. + desired_std (float): Desired standard deviation of the rescaled array. + + Returns: + np.ndarray: Rescaled array with the desired mean and standard deviation. + """ + current_mean = np.mean(array) + current_std = np.std(array) + + # Rescale the array + rescaled_array = (array - current_mean) / current_std * desired_std + desired_mean + + return rescaled_array + +def make_test_dataset(features, num_train=50000, num_test=10000, num_valid=10000): + """ + Create a test dataset with specified features and splits. + + Parameters: + features (list): List of column names for the features. + num_train (int): Number of training samples. + num_test (int): Number of test samples. + num_valid (int): Number of validation samples. + + Returns: + pd.DataFrame: DataFrame with the specified features and an additional 'id' column. + np.ndarray: IDs for the training split. + np.ndarray: IDs for the test split. + np.ndarray: IDs for the validation split. + """ + # Create an empty DataFrame + df = pd.DataFrame() + + # Generate IDs + total_samples = num_train + num_test + num_valid + compounds_ids = np.arange(total_samples) + + # Populate feature columns with numbers from different normal distributions + dat = {'compound_id':compounds_ids} + for feature in features: + train_data = np.random.normal(loc=0, scale=1, size=num_train) + train_data = rescale(train_data, 0, 2) + valid_data = np.random.normal(loc=0, scale=1, size=num_valid) + valid_data = rescale(valid_data, 10, 5) + test_data = np.random.normal(loc=0, scale=1, size=num_test) + test_data = rescale(test_data, 100, 10) + dat[feature] = np.concatenate([train_data, test_data, valid_data]) + + # Add response columns with different means + dat['response_1'] = np.random.normal(loc=0, scale=1, size=total_samples) + dat['response_2'] = np.random.normal(loc=100, scale=1, size=total_samples) + + # Add class column + class_labels = np.zeros(total_samples) + dat['class'] = class_labels + + df = pd.DataFrame(data=dat) + + # Ensure each subset has the same ratio of 0s and 1s in the class column + train_class = np.concatenate([ + np.zeros(int(num_train * 0.8)), + np.ones(int(num_train * 0.2)) + ]) + np.random.shuffle(train_class) + df.loc[:num_train-1, 'class'] = train_class + + valid_class = np.concatenate([ + np.zeros(int(num_valid * 0.5)), + np.ones(int(num_valid * 0.5)) + ]) + np.random.shuffle(valid_class) + df.loc[num_train:num_train+num_valid-1, 'class'] = valid_class + + test_class = np.concatenate([ + np.zeros(int(num_test * 0.5)), + np.ones(int(num_test * 0.5)) + ]) + np.random.shuffle(test_class) + df.loc[num_train+num_valid:, 'class'] = test_class + + # Split IDs + train_ids = df['compound_id'][:num_train].values + test_ids = df['compound_id'][num_train:num_train + num_test].values + valid_ids = df['compound_id'][num_train + num_test:].values + + return df, train_ids, valid_ids, test_ids + +def make_split_df(train_ids, valid_ids, test_ids): + """ + Create a DataFrame with train, valid, and test IDs, and additional columns for subset and fold. + + Parameters: + train_ids (list or np.ndarray): List of training IDs. + valid_ids (list or np.ndarray): List of validation IDs. + test_ids (list or np.ndarray): List of test IDs. + + Returns: + pd.DataFrame: DataFrame with 'id', 'subset', and 'fold' columns. + """ + # Create DataFrames for each subset + train_df = pd.DataFrame({'cmpd_id': train_ids, 'subset': 'train', 'fold': 0}) + valid_df = pd.DataFrame({'cmpd_id': valid_ids, 'subset': 'valid', 'fold': 0}) + test_df = pd.DataFrame({'cmpd_id': test_ids, 'subset': 'test', 'fold': 0}) + + # Concatenate the DataFrames + split_df = pd.concat([train_df, valid_df, test_df], ignore_index=True) + + return split_df + +def make_test_dataset_and_split(dataset_key, feature_types): + features = get_features(feature_types) + df, train_ids, valid_ids, test_ids = make_test_dataset(features) + + df['rdkit_smiles'] = ['CCCC'] * len(df) + split_df = make_split_df(train_ids, valid_ids, test_ids) + + df.to_csv(dataset_key, index=False) + + split_filename = os.path.splitext(dataset_key)[0]+'_train_valid_test_index_testsplit.csv' + split_df.to_csv(split_filename, index=False) \ No newline at end of file diff --git a/atomsci/ddm/test/unit/test_transformers.py b/atomsci/ddm/test/unit/test_transformers.py index f5599bdf..02e6b87f 100644 --- a/atomsci/ddm/test/unit/test_transformers.py +++ b/atomsci/ddm/test/unit/test_transformers.py @@ -43,4 +43,52 @@ def test_one_task_no_missing_values(): np.testing.assert_array_almost_equal(y_means, [3.0]) np.testing.assert_array_almost_equal(y_stds, [1.632993]) +def test_normalization_transformer_missing_data(): + # Create a mock dataset + X = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) + y = np.array([[1.0, np.nan], [3.0, 4.0], [5.0, 6.0]]) + w = np.array([[1, 0], [1, 1], [1, 1]]) + ids = np.array(range(len(y))) + dataset = NumpyDataset(X=X, y=y, w=w, ids=ids) + + # Initialize the transformer + transformer = trans.NormalizationTransformerMissingData(transform_X=False, transform_y=True, dataset=dataset) + + # Check the means and standard deviations + expected_y_means = np.array([3.0, 5.0]) + expected_y_stds = np.array([1.632993, 1.0]) + np.testing.assert_array_almost_equal(transformer.y_means, expected_y_means) + np.testing.assert_array_almost_equal(transformer.y_stds, expected_y_stds) + + # Apply the transformation + transformed_dataset = transformer.transform(dataset) + + # Check the transformed values + # np.nan is replaced with 0 + expected_transformed_y = np.array([[-1.224745, 0], [0.0, -1.0], [1.224745, 1.0]]) + np.testing.assert_array_almost_equal(transformed_dataset.y, expected_transformed_y, decimal=6) + +def test_normalization_transformer_missing_data_transform_X(): + # Create a mock dataset + X = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) + y = np.array([[1.0], [3.0], [5.0]]) + w = np.array([[1], [1], [1]]) + ids = np.array(range(len(y))) + dataset = NumpyDataset(X=X, y=y, w=w, ids=ids) + + # Initialize the transformer with transform_X=True + transformer = trans.NormalizationTransformerMissingData(transform_X=True, dataset=dataset) + + # Check the means and standard deviations + expected_X_means = np.array([3.0, 4.0]) + expected_X_stds = np.array([1.632993, 1.632993]) + np.testing.assert_array_almost_equal(transformer.X_means, expected_X_means) + np.testing.assert_array_almost_equal(transformer.X_stds, expected_X_stds) + + # Apply the transformation + transformed_dataset = transformer.transform(dataset) + + # Check the transformed values + expected_transformed_X = (X - expected_X_means) / expected_X_stds + np.testing.assert_array_almost_equal(transformed_dataset.X, expected_transformed_X, decimal=6) From 804a62b5840884977d350455c76875bb84708708 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 15:46:18 -0800 Subject: [PATCH 17/30] Added check to make sure that every requested id in the subset has a response value found --- atomsci/ddm/pipeline/model_datasets.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index b01120ff..d3e9dbcc 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -709,6 +709,8 @@ def get_subset_responses_and_weights(self, subset): for id, y in zip(self.untransformed_dataset.ids, self.untransformed_dataset.y): if id in dataset_ids: response_vals[id] = y + # we need to double check that all responses_vals we asked for were found + assert len(response_vals) == len(dataset_ids) w = dataset.w weights = dict([(id, w[i,:]) for i, id in enumerate(dataset.ids)]) From 23bd84fc66b4dcd9c236cef07ebd39b83b6aeaa4 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 15:53:19 -0800 Subject: [PATCH 18/30] call get_untransformed_responses instead --- atomsci/ddm/pipeline/model_datasets.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index d3e9dbcc..bf5babca 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -704,13 +704,7 @@ def get_subset_responses_and_weights(self, subset): else: raise ValueError('Unknown dataset subset type "%s"' % subset) - response_vals = dict() - dataset_ids = set(dataset.ids) - for id, y in zip(self.untransformed_dataset.ids, self.untransformed_dataset.y): - if id in dataset_ids: - response_vals[id] = y - # we need to double check that all responses_vals we asked for were found - assert len(response_vals) == len(dataset_ids) + response_vals = self.get_untransformed_responses(dataset.ids) w = dataset.w weights = dict([(id, w[i,:]) for i, id in enumerate(dataset.ids)]) @@ -729,6 +723,9 @@ def get_untransformed_responses(self, ids): for i, id in enumerate(ids): response_vals[i] = response_dict[id] + # we need to double check that all responses_vals we asked for were found + assert len(response_vals) == len(set(ids)) + return response_vals # ************************************************************************************* From 61c18fd97fa1d51d969cd00edb49fca3b0d2506d Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 16:01:43 -0800 Subject: [PATCH 19/30] Cache the untransformed response dict --- atomsci/ddm/pipeline/model_datasets.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index bf5babca..7d528f9e 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -318,6 +318,9 @@ def __init__(self, params, featurization): self.subset_response_dict = {} # Cache for subset-specific response values matched to IDs, used by k-fold CV code self.subset_weight_dict = {} + # Cache for untransformed response values matched to IDs, used by k-fold CV code + self.untransformed_response_dict = {} + # **************************************************************************************** def load_full_dataset(self): @@ -718,10 +721,11 @@ def get_untransformed_responses(self, ids): """ Returns a numpy array of untransformed response values """ response_vals = np.zeros((len(ids), self.untransformed_dataset.y.shape[1])) - response_dict = dict([(id, y) for id, y in zip(self.untransformed_dataset.ids, self.untransformed_dataset.y)]) + if len(self.untransformed_response_dict) == 0: + self.untransformed_response_dict = dict(zip(self.untransformed_dataset.ids, self.untransformed_dataset.y)) for i, id in enumerate(ids): - response_vals[i] = response_dict[id] + response_vals[i] = self.untransformed_response_dict[id] # we need to double check that all responses_vals we asked for were found assert len(response_vals) == len(set(ids)) From 9c8abc333979fb85a3884b0f95d900a96435f366 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 16:09:47 -0800 Subject: [PATCH 20/30] Should not have to pass a 'final' argument --- atomsci/ddm/pipeline/perf_plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomsci/ddm/pipeline/perf_plots.py b/atomsci/ddm/pipeline/perf_plots.py index 6dcf592d..6fcdcb04 100644 --- a/atomsci/ddm/pipeline/perf_plots.py +++ b/atomsci/ddm/pipeline/perf_plots.py @@ -165,8 +165,8 @@ def plot_pred_vs_actual(model, epoch_label='best', threshold=None, error_bars=Fa for s, subset in enumerate(subsets): perf_data = wrapper.get_perf_data(subset, epoch_label) pred_results = perf_data.get_prediction_results() - y_actual = perf_data.get_real_values('final') - ids, y_pred, y_std = perf_data.get_pred_values('final') + y_actual = perf_data.get_real_values() + ids, y_pred, y_std = perf_data.get_pred_values() r2 = pred_results['r2_score'] if perf_data.num_tasks > 1: r2_scores = pred_results['task_r2_scores'] From a7ed96a4be26c4a6883bd88a3ae71e9ef557032a Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 12 Dec 2024 16:19:41 -0800 Subject: [PATCH 21/30] Weights and y should be the same shape --- atomsci/ddm/test/unit/test_transformers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomsci/ddm/test/unit/test_transformers.py b/atomsci/ddm/test/unit/test_transformers.py index 02e6b87f..46a3fbd3 100644 --- a/atomsci/ddm/test/unit/test_transformers.py +++ b/atomsci/ddm/test/unit/test_transformers.py @@ -35,7 +35,7 @@ def test_all_missing_values(): def test_one_task_no_missing_values(): y = np.array([[1.0], [3.0], [5.0]]) - w = np.array([[1, 1], [1, 1], [1, 1]]) + w = np.array([[1], [1], [1]]) x = np.ones_like(y) ids = np.array(range(len(y))) dataset = NumpyDataset(X=x, y=y, w=w, ids=ids) From b0940ad7ecb7cd0453c488e8ec56a441e9453bef Mon Sep 17 00:00:00 2001 From: Amanda Paulson Date: Thu, 12 Dec 2024 17:06:44 -0800 Subject: [PATCH 22/30] dataset transformation moved into generate_predictions() --- atomsci/ddm/pipeline/model_pipeline.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/atomsci/ddm/pipeline/model_pipeline.py b/atomsci/ddm/pipeline/model_pipeline.py index 8cb3adc9..7d49092a 100644 --- a/atomsci/ddm/pipeline/model_pipeline.py +++ b/atomsci/ddm/pipeline/model_pipeline.py @@ -856,8 +856,6 @@ def predict_full_dataset(self, dset_df, is_featurized=False, contains_responses= raise Exception("response_cols missing from model params") # Get features for each compound and construct a DeepChem Dataset from them self.data.get_featurized_data(dset_df, is_featurized) - # Transform the features and responses if needed - self.data.dataset = self.model_wrapper.transform_dataset(self.data.dataset, fold='final') # Note that at this point, the dataset may contain fewer rows than the input. Typically this happens because # of invalid SMILES strings. Remove any rows from the input dataframe corresponding to SMILES strings that were From a7eb892e8194de5200d12dab873449affcc8e7a4 Mon Sep 17 00:00:00 2001 From: Amanda Paulson Date: Thu, 12 Dec 2024 17:07:58 -0800 Subject: [PATCH 23/30] zero out transformed values that are larger than 1e30 --- atomsci/ddm/pipeline/transformations.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index 2e7c7420..8abc9712 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -289,6 +289,8 @@ def transform_array(self, X, y, w, ids): X = np.nan_to_num((X - self.X_means) * X_weight / self.X_stds) else: X = np.nan_to_num(X * X_weight / self.X_stds) + # zero out large values, especially for out of range test data + X[np.abs(X) > 1e30] = 0 if self.transform_y: if not hasattr(self, 'move_mean') or self.move_mean: y = np.nan_to_num((y - self.y_means) / self.y_stds) From d1b25d8e71b0421596f274aa13c6eac5fac1dbff Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Mon, 16 Dec 2024 09:44:13 -0800 Subject: [PATCH 24/30] get_untransformed_responses returns an array, not a dictionary --- atomsci/ddm/pipeline/model_datasets.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index 7d528f9e..e89b45bb 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -707,7 +707,7 @@ def get_subset_responses_and_weights(self, subset): else: raise ValueError('Unknown dataset subset type "%s"' % subset) - response_vals = self.get_untransformed_responses(dataset.ids) + response_vals = dict(zip(dataset.ids, self.get_untransformed_responses(dataset.ids))) w = dataset.w weights = dict([(id, w[i,:]) for i, id in enumerate(dataset.ids)]) @@ -727,9 +727,6 @@ def get_untransformed_responses(self, ids): for i, id in enumerate(ids): response_vals[i] = self.untransformed_response_dict[id] - # we need to double check that all responses_vals we asked for were found - assert len(response_vals) == len(set(ids)) - return response_vals # ************************************************************************************* From 74c19537502eb14842b54496178cd89ba461080b Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Mon, 16 Dec 2024 10:38:34 -0800 Subject: [PATCH 25/30] sped up and updated the test --- .../balancing_trans/jsons/balancing_transformer.json | 5 ++--- .../jsons/wo_balancing_transformer.json | 2 +- .../balancing_trans/test_balancing_transformer.py | 11 ++++++----- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json index 3a9b127f..93eabb47 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/balancing_transformer.json @@ -15,11 +15,10 @@ "system": "LC", "transformers": "True", "model_type": "NN", - "featurizer": "computed_descriptors", - "descriptor_type": "rdkit_raw", + "featurizer": "ecfp", "weight_transform_type": "balancing", "learning_rate": ".0007", - "layer_sizes": "512,128", + "layer_sizes": "20,10", "dropouts": "0.3,0.3", "save_results": "False", "max_epochs": "2", diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json index 00c5160c..51cceea9 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/wo_balancing_transformer.json @@ -17,7 +17,7 @@ "model_type": "NN", "featurizer": "ecfp", "learning_rate": ".0007", - "layer_sizes": "512,128", + "layer_sizes": "20,10", "dropouts": "0.3,0.3", "save_results": "False", "max_epochs": "2", diff --git a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py index 92c52687..b26b7b7c 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py +++ b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py @@ -93,10 +93,11 @@ def make_pipeline(params): def make_pipeline_and_get_weights(params): model_pipeline = make_pipeline(params) + model_wrapper = model_pipeline.model_wrapper + train_dataset = model_pipeline.data.train_valid_dsets[0][0] + transformed_data = model_wrapper.transform_dataset(train_dataset, fold=0) - print(model_pipeline.model_wrapper.transformers_w) - print(np.unique(model_pipeline.data.train_valid_dsets[0][0].y, return_counts=True)) - return model_pipeline.data.train_valid_dsets[0][0].w + return transformed_data.w def make_relative_to_file(relative_path): script_path = os.path.dirname(os.path.realpath(__file__)) @@ -131,5 +132,5 @@ def params_w_balan(dset_key, res_dir): return params if __name__ == '__main__': - test_all_transformers() - #test_balancing_transformer() \ No newline at end of file + #test_all_transformers() + test_balancing_transformer() \ No newline at end of file From 58c454db0d5560ee0121b74846986f5cf2b05d95 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Mon, 16 Dec 2024 10:54:58 -0800 Subject: [PATCH 26/30] Updated transformer test to correctly test the standard deviation and to use a new json file specific to that test --- .../balancing_trans/jsons/all_transforms.json | 29 +++++++++++++++++++ .../test_balancing_transformer.py | 10 +++++-- 2 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms.json diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms.json new file mode 100644 index 00000000..40b7c5a6 --- /dev/null +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms.json @@ -0,0 +1,29 @@ +{ + "dataset_key" : "replaced", + "datastore" : "False", + "uncertainty": "False", + "splitter": "scaffold", + "split_valid_frac": "0.20", + "split_test_frac": "0.20", + "split_strategy": "train_valid_test", + "prediction_type": "classification", + "model_choice_score_type": "roc_auc", + "response_cols" : "active", + "id_col": "compound_id", + "smiles_col" : "rdkit_smiles", + "result_dir": "replaced", + "system": "LC", + "transformers": "True", + "model_type": "NN", + "featurizer": "computed_descriptors", + "descriptor_type": "rdkit_raw", + "weight_transform_type": "balancing", + "learning_rate": ".0007", + "layer_sizes": "20,10", + "dropouts": "0.3,0.3", + "save_results": "False", + "max_epochs": "2", + "early_stopping_patience": "2", + "verbose": "False", + "seed":"0" + } \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py index b26b7b7c..31114379 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py +++ b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py @@ -34,7 +34,11 @@ def test_balancing_transformer(): def test_all_transformers(): res_dir = tempfile.mkdtemp() dskey = os.path.join(res_dir, 'special_test_dset.csv') - params = params_w_balan(dskey, res_dir) + params = read_params( + make_relative_to_file('jsons/all_transforms.json'), + dskey, + res_dir + ) make_test_datasets.make_test_dataset_and_split(dskey, params['descriptor_type']) params['previously_featurized'] = True @@ -77,7 +81,7 @@ def test_all_transformers(): # untransformed mean is 10 expected transformed mean is (10 - 0) / 2 assert abs(np.mean(trans_valid_dset.X) - 5) < 1e-4 # untransformed std is 5 expected transformed std is 5/2 - assert abs(np.std(trans_valid_dset.X) - (2.5)) + assert abs(np.std(trans_valid_dset.X) - (2.5)) < 1e-4 # validation has a 50/50 split. Majority class * 4 should equal oversampled minority class valid_weights = trans_valid_dset.w (valid_weight1, valid_weight2), (valid_count1, valid_count2) = np.unique(valid_weights, return_counts=True) @@ -132,5 +136,5 @@ def params_w_balan(dset_key, res_dir): return params if __name__ == '__main__': - #test_all_transformers() + test_all_transformers() test_balancing_transformer() \ No newline at end of file From d17bfc4fc0466601a193f653c01bf62483cd6d15 Mon Sep 17 00:00:00 2001 From: Amanda Paulson Date: Wed, 18 Dec 2024 14:02:05 -0800 Subject: [PATCH 27/30] update large values to be capped at 1e30 --- atomsci/ddm/pipeline/transformations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index 8abc9712..fd8bedaa 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -290,7 +290,7 @@ def transform_array(self, X, y, w, ids): else: X = np.nan_to_num(X * X_weight / self.X_stds) # zero out large values, especially for out of range test data - X[np.abs(X) > 1e30] = 0 + X[np.abs(X) > 1e30] = 1e30 if self.transform_y: if not hasattr(self, 'move_mean') or self.move_mean: y = np.nan_to_num((y - self.y_means) / self.y_stds) From d5b3b554823966c153f472952c411fa4d67e40ca Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Wed, 18 Dec 2024 16:47:12 -0800 Subject: [PATCH 28/30] Added a test for kfold cross validation transformers --- atomsci/ddm/pipeline/model_wrapper.py | 2 +- .../jsons/kfold_all_transforms.json | 29 +++ .../test_balancing_transformer.py | 88 ++++++++- .../test/integrative/make_test_datasets.py | 185 +++++++++++++----- 4 files changed, 252 insertions(+), 52 deletions(-) create mode 100644 atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index f637940b..b4e4d4ab 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -977,7 +977,7 @@ def make_pred(x): self._copy_model(self.best_model_dir) retrain_time = time.time() - retrain_start self.log.info("Time to retrain model for %d epochs: %.1f seconds, %.1f sec/epoch" % (self.best_epoch, retrain_time, - retrain_time/self.best_epoch)) + retrain_time/max(1, self.best_epoch))) # **************************************************************************************** def train_with_early_stopping(self, pipeline): diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json new file mode 100644 index 00000000..40b7c5a6 --- /dev/null +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json @@ -0,0 +1,29 @@ +{ + "dataset_key" : "replaced", + "datastore" : "False", + "uncertainty": "False", + "splitter": "scaffold", + "split_valid_frac": "0.20", + "split_test_frac": "0.20", + "split_strategy": "train_valid_test", + "prediction_type": "classification", + "model_choice_score_type": "roc_auc", + "response_cols" : "active", + "id_col": "compound_id", + "smiles_col" : "rdkit_smiles", + "result_dir": "replaced", + "system": "LC", + "transformers": "True", + "model_type": "NN", + "featurizer": "computed_descriptors", + "descriptor_type": "rdkit_raw", + "weight_transform_type": "balancing", + "learning_rate": ".0007", + "layer_sizes": "20,10", + "dropouts": "0.3,0.3", + "save_results": "False", + "max_epochs": "2", + "early_stopping_patience": "2", + "verbose": "False", + "seed":"0" + } \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py index 31114379..e707ca34 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py +++ b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py @@ -78,9 +78,9 @@ def test_all_transformers(): valid_dset = model_pipeline.data.train_valid_dsets[0][1] trans_valid_dset = model_pipeline.model_wrapper.transform_dataset(valid_dset, fold=0) - # untransformed mean is 10 expected transformed mean is (10 - 0) / 2 + # transformed validation mean is 10 expected transformed mean is (10 - 0) / 2 assert abs(np.mean(trans_valid_dset.X) - 5) < 1e-4 - # untransformed std is 5 expected transformed std is 5/2 + # transformed validation std is 5 expected transformed std is 5/2 assert abs(np.std(trans_valid_dset.X) - (2.5)) < 1e-4 # validation has a 50/50 split. Majority class * 4 should equal oversampled minority class valid_weights = trans_valid_dset.w @@ -135,6 +135,90 @@ def params_w_balan(dset_key, res_dir): return params +def test_kfold_transformers(): + res_dir = tempfile.mkdtemp() + dskey = os.path.join(res_dir, 'special_test_dset.csv') + params = read_params( + make_relative_to_file('jsons/all_transforms.json'), + dskey, + res_dir + ) + num_folds = 3 + + make_test_datasets.make_kfold_dataset_and_split(dskey, + params['descriptor_type'], num_folds=num_folds) + + params['previously_featurized'] = True + params['previously_split'] = True + params['splitter'] = 'index' + params['split_uuid'] = 'testsplit' + params['split_strategy'] = 'k_fold_cv' + params['num_folds'] = num_folds + params['response_cols'] = ['class'] + + # check that the transformers are correct + model_pipeline = make_pipeline(params) + for f in range(num_folds): + assert(len(model_pipeline.model_wrapper.transformers[f])==0) + + assert(len(model_pipeline.model_wrapper.transformers_x[f])==1) + transformer_x = model_pipeline.model_wrapper.transformers_x[f][0] + assert(isinstance(transformer_x, trans.NormalizationTransformerMissingData)) + + assert(len(model_pipeline.model_wrapper.transformers_w[f])==1) + assert(isinstance(model_pipeline.model_wrapper.transformers_w[f][0], trans.BalancingTransformer)) + + assert(len(model_pipeline.data.train_valid_dsets)==num_folds) + for i, (train_dset, valid_dset) in enumerate(model_pipeline.data.train_valid_dsets): + # check that the transforms are correct + trans_train_dset = model_pipeline.model_wrapper.transform_dataset(train_dset, fold=i) + transformer_x = model_pipeline.model_wrapper.transformers_x[i][0] + + # the mean of each fold is the square of the fold value + fold_means = [f*f for f in range(num_folds)] + fold_means.remove(i*i) + expected_mean = sum(fold_means)/len(fold_means) + + # mean should be nearly 0 + assert abs(np.mean(trans_train_dset.X)) < 1e-4 + + # transformer means should be around expected_mean + np.testing.assert_array_almost_equal(transformer_x.X_means, np.ones_like(transformer_x.X_means)*expected_mean) + + # std should be nearly 1 + assert abs(np.std(trans_train_dset.X) - 1) < 1e-4 + + # there is an 80/20 imbalance in classification + weights = trans_train_dset.w + (weight1, weight2), (count1, count2) = np.unique(weights, return_counts=True) + assert (weight1*count1 - weight2*count2) < 1e-3 + + # validation set has a different distribution + trans_valid_dset = model_pipeline.model_wrapper.transform_dataset(valid_dset, fold=i) + + # validation mean should not be 0 + assert abs(np.mean(trans_valid_dset.X)) > 1e-4 + + # validation has a 80/20 split. Majority class * 4 should equal oversampled minority class + valid_weights = trans_valid_dset.w + (valid_weight1, valid_weight2), (valid_count1, valid_count2) = np.unique(valid_weights, return_counts=True) + assert (valid_weight1*valid_count1 - valid_weight2*valid_count2) < 1e-3 + + # test that the final transformer is correct + expected_mean = sum([f*f for f in range(num_folds)])/num_folds + trans_combined_dset = model_pipeline.model_wrapper.transform_dataset( + model_pipeline.data.combined_training_data(), fold='final') + # mean should be nearly 0 + assert abs(np.mean(trans_combined_dset.X)) < 1e-4 + + assert(len(model_pipeline.model_wrapper.transformers_x['final'])==1) + transformer_x = model_pipeline.model_wrapper.transformers_x['final'][0] + assert(isinstance(transformer_x, trans.NormalizationTransformerMissingData)) + # transformer means should be around expected_mean + np.testing.assert_array_almost_equal(transformer_x.X_means, np.ones_like(transformer_x.X_means)*expected_mean) + + if __name__ == '__main__': + test_kfold_transformers() test_all_transformers() test_balancing_transformer() \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/make_test_datasets.py b/atomsci/ddm/test/integrative/make_test_datasets.py index 27cae79c..e6ed8764 100644 --- a/atomsci/ddm/test/integrative/make_test_datasets.py +++ b/atomsci/ddm/test/integrative/make_test_datasets.py @@ -48,7 +48,7 @@ def rescale(array, desired_mean, desired_std): return rescaled_array -def make_test_dataset(features, num_train=50000, num_test=10000, num_valid=10000): +def make_test_dataset(features, num_train=500, num_test=100, num_valid=100): """ Create a test dataset with specified features and splits. @@ -59,67 +59,80 @@ def make_test_dataset(features, num_train=50000, num_test=10000, num_valid=10000 num_valid (int): Number of validation samples. Returns: - pd.DataFrame: DataFrame with the specified features and an additional 'id' column. + pd.DataFrame: DataFrame with the specified features and additional columns. np.ndarray: IDs for the training split. np.ndarray: IDs for the test split. np.ndarray: IDs for the validation split. """ - # Create an empty DataFrame - df = pd.DataFrame() + # Generate data for each subset + train_df = generate_subset_df(num_train, features, feature_mean=0, feature_std=2, + response1_mean=0, response1_std=1, + response2_mean=100, response2_std=1, + positive_frac=0.2) + valid_df = generate_subset_df(num_valid, features, feature_mean=10, feature_std=5, + response1_mean=0, response1_std=1, + response2_mean=100, response2_std=1, + positive_frac=0.5) + test_df = generate_subset_df(num_test, features, feature_mean=100, feature_std=10, + response1_mean=0, response1_std=1, + response2_mean=100, response2_std=1, + positive_frac=0.5) + + # Concatenate the dataframes + df = pd.concat([train_df, valid_df, test_df], ignore_index=True) + df['compound_id'] = list(range(len(df))) - # Generate IDs - total_samples = num_train + num_test + num_valid - compounds_ids = np.arange(total_samples) + # Split IDs + train_ids = df['compound_id'][:num_train].values + valid_ids = df['compound_id'][num_train:num_train + num_valid].values + test_ids = df['compound_id'][num_train + num_valid:].values - # Populate feature columns with numbers from different normal distributions - dat = {'compound_id':compounds_ids} + return df, train_ids, valid_ids, test_ids + +def generate_subset_df(fold_size, features, + feature_mean, feature_std, + response1_mean=0, response1_std=1, + response2_mean=100, response2_std=10, + positive_frac=0.8): + """ + Generate a fold of data with specified parameters. + + Parameters: + fold_size (int): Number of samples in the fold. + features (list): List of feature names. + feature_mean (float): Mean of the feature data. + feature_std (float): Standard deviation of the feature data. + response1_mean (float): Mean of the response_1 data. + response1_std (float): Standard deviation of the response_1 data. + response2_mean (float): Mean of the response_2 data. + response2_std (float): Standard deviation of the response_2 data. + positive_frac (float): Fraction of positive class labels. + + Returns: + pd.DataFrame: DataFrame containing the generated fold data. + """ + dat = {} for feature in features: - train_data = np.random.normal(loc=0, scale=1, size=num_train) - train_data = rescale(train_data, 0, 2) - valid_data = np.random.normal(loc=0, scale=1, size=num_valid) - valid_data = rescale(valid_data, 10, 5) - test_data = np.random.normal(loc=0, scale=1, size=num_test) - test_data = rescale(test_data, 100, 10) - dat[feature] = np.concatenate([train_data, test_data, valid_data]) + fold_data = np.random.normal(loc=0, scale=1, size=fold_size) + fold_data = rescale(fold_data, feature_mean, feature_std) + dat[feature] = fold_data # Add response columns with different means - dat['response_1'] = np.random.normal(loc=0, scale=1, size=total_samples) - dat['response_2'] = np.random.normal(loc=100, scale=1, size=total_samples) + response_1 = np.random.normal(loc=0, scale=1, size=fold_size) + dat['response_1'] = rescale(response_1, response1_mean, response1_std) + response_2 = np.random.normal(loc=response2_mean, scale=response2_std, size=fold_size) + dat['response_2'] = rescale(response_2, response2_mean, response2_std) # Add class column - class_labels = np.zeros(total_samples) - dat['class'] = class_labels - - df = pd.DataFrame(data=dat) - # Ensure each subset has the same ratio of 0s and 1s in the class column - train_class = np.concatenate([ - np.zeros(int(num_train * 0.8)), - np.ones(int(num_train * 0.2)) - ]) - np.random.shuffle(train_class) - df.loc[:num_train-1, 'class'] = train_class - - valid_class = np.concatenate([ - np.zeros(int(num_valid * 0.5)), - np.ones(int(num_valid * 0.5)) - ]) - np.random.shuffle(valid_class) - df.loc[num_train:num_train+num_valid-1, 'class'] = valid_class - - test_class = np.concatenate([ - np.zeros(int(num_test * 0.5)), - np.ones(int(num_test * 0.5)) + class_labels = np.concatenate([ + np.zeros(int(fold_size * (1 - positive_frac))), + np.ones(int(fold_size * positive_frac)) ]) - np.random.shuffle(test_class) - df.loc[num_train+num_valid:, 'class'] = test_class - - # Split IDs - train_ids = df['compound_id'][:num_train].values - test_ids = df['compound_id'][num_train:num_train + num_test].values - valid_ids = df['compound_id'][num_train + num_test:].values + np.random.shuffle(class_labels) + dat['class'] = class_labels - return df, train_ids, valid_ids, test_ids + return pd.DataFrame(data=dat) def make_split_df(train_ids, valid_ids, test_ids): """ @@ -153,4 +166,78 @@ def make_test_dataset_and_split(dataset_key, feature_types): df.to_csv(dataset_key, index=False) split_filename = os.path.splitext(dataset_key)[0]+'_train_valid_test_index_testsplit.csv' - split_df.to_csv(split_filename, index=False) \ No newline at end of file + split_df.to_csv(split_filename, index=False) + +def make_kfold_test_dataset(features, fold_size=1000, num_test=1000, num_folds=5): + """ + Create a k-fold test dataset with specified features and splits. + + Parameters: + features (list): List of column names for the features. + fold_size (int): Number of samples in each fold. + num_test (int): Number of test samples. + num_folds (int): Number of folds. + + Returns: + pd.DataFrame: DataFrame with the specified features and additional columns. + list: List of lists containing IDs for each fold. + np.ndarray: IDs for the test split. + """ + # Populate feature columns with numbers from different normal distributions + fold_dfs = [] + for f in range(num_folds): + fold_dfs.append(generate_subset_df(fold_size, features, + feature_mean=f*f, feature_std=f+1, + response1_mean=f*f, response1_std=f+1, + response2_mean=f*f*10, response2_std=(f+1)*10, + positive_frac=0.2)) + + # generate test_df + fold_dfs.append(generate_subset_df(num_test, features, + feature_mean=num_folds*num_folds, feature_std=num_folds+2, + response1_mean=num_folds*num_folds, response1_std=num_folds+2, + response2_mean=(num_folds*num_folds)*10, response2_std=(num_folds+2)*10, + positive_frac=0.5)) + + df = pd.concat(fold_dfs) + df['compound_id'] = list(range(len(df))) + + train_valid_ids = [] + for f in range(num_folds): + train_valid_ids.append(list(range(f*fold_size, (f+1)*fold_size))) + + test_ids = df['compound_id'].values[-num_test:] + + return df, train_valid_ids, test_ids + +def make_kfold_split_df(train_valid_ids, test_ids): + fold_dfs = [] + for i, tvi in enumerate(train_valid_ids): + data = {'cmpd_id':tvi} + data['subset'] = ['train_valid']*len(tvi) + data['fold'] = [i]*len(tvi) + + fold_dfs.append(pd.DataFrame(data=data)) + + data = {'cmpd_id':test_ids} + data['subset'] = ['test']*len(test_ids) + data['fold'] = [0]*len(test_ids) + + fold_dfs.append(pd.DataFrame(data=data)) + + split_df = pd.concat(fold_dfs) + + return split_df + +def make_kfold_dataset_and_split(dataset_key, feature_types, num_folds=3): + features = get_features(feature_types) + df, train_ids, test_ids = make_kfold_test_dataset(features, num_folds=num_folds) + + df['rdkit_smiles'] = ['CCCC'] * len(df) + split_df = make_kfold_split_df(train_ids, test_ids) + + df.to_csv(dataset_key, index=False) + + split_filename = os.path.splitext(dataset_key)[0]+f'_{num_folds}_fold_cv_index_testsplit.csv' + split_df.to_csv(split_filename, index=False) + From 3fb2f455857ed442c6bab89d0bc584b55fc4e778 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Thu, 19 Dec 2024 10:11:11 -0800 Subject: [PATCH 29/30] Test for y transformers --- ...ms.json => all_transforms_regression.json} | 18 ++--- .../test_balancing_transformer.py | 81 ++++++++++++++++++- 2 files changed, 86 insertions(+), 13 deletions(-) rename atomsci/ddm/test/integrative/balancing_trans/jsons/{kfold_all_transforms.json => all_transforms_regression.json} (64%) diff --git a/atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json b/atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms_regression.json similarity index 64% rename from atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json rename to atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms_regression.json index 40b7c5a6..3dfc4671 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/jsons/kfold_all_transforms.json +++ b/atomsci/ddm/test/integrative/balancing_trans/jsons/all_transforms_regression.json @@ -2,13 +2,12 @@ "dataset_key" : "replaced", "datastore" : "False", "uncertainty": "False", - "splitter": "scaffold", - "split_valid_frac": "0.20", - "split_test_frac": "0.20", - "split_strategy": "train_valid_test", - "prediction_type": "classification", - "model_choice_score_type": "roc_auc", - "response_cols" : "active", + "splitter": "index", + "previously_split": "True", + "split_strategy": "k_fold_cv", + "num_folds": "3", + "prediction_type": "regression", + "response_cols": ["response_1", "response_2"], "id_col": "compound_id", "smiles_col" : "rdkit_smiles", "result_dir": "replaced", @@ -17,7 +16,6 @@ "model_type": "NN", "featurizer": "computed_descriptors", "descriptor_type": "rdkit_raw", - "weight_transform_type": "balancing", "learning_rate": ".0007", "layer_sizes": "20,10", "dropouts": "0.3,0.3", @@ -25,5 +23,7 @@ "max_epochs": "2", "early_stopping_patience": "2", "verbose": "False", - "seed":"0" + "seed":"0", + "previously_featurized": "True", + "split_uuid": "testsplit" } \ No newline at end of file diff --git a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py index e707ca34..7ddf0b2d 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py +++ b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py @@ -87,7 +87,6 @@ def test_all_transformers(): (valid_weight1, valid_weight2), (valid_count1, valid_count2) = np.unique(valid_weights, return_counts=True) assert (valid_weight1*valid_count1*4 - valid_weight2*valid_count2) < 1e-4 - def make_pipeline(params): pparams = parse.wrapper(params) model_pipeline = mp.ModelPipeline(pparams) @@ -217,8 +216,82 @@ def test_kfold_transformers(): # transformer means should be around expected_mean np.testing.assert_array_almost_equal(transformer_x.X_means, np.ones_like(transformer_x.X_means)*expected_mean) +def test_kfold_regression_transformers(): + res_dir = tempfile.mkdtemp() + dskey = os.path.join(res_dir, 'special_test_dset.csv') + params = read_params( + make_relative_to_file('jsons/all_transforms_regression.json'), + dskey, + res_dir + ) + num_folds = 3 + + make_test_datasets.make_kfold_dataset_and_split(dskey, + params['descriptor_type'], num_folds=num_folds) + + # check that the transformers are correct + model_pipeline = make_pipeline(params) + for f in range(num_folds): + assert(len(model_pipeline.model_wrapper.transformers[f])==1) + transformer = model_pipeline.model_wrapper.transformers[f][0] + assert(isinstance(transformer, trans.NormalizationTransformerMissingData)) + + + assert(len(model_pipeline.model_wrapper.transformers_x[f])==1) + transformer_x = model_pipeline.model_wrapper.transformers_x[f][0] + assert(isinstance(transformer_x, trans.NormalizationTransformerMissingData)) + + assert(len(model_pipeline.model_wrapper.transformers_w[f])==0) + + assert(len(model_pipeline.data.train_valid_dsets)==num_folds) + for i, (train_dset, valid_dset) in enumerate(model_pipeline.data.train_valid_dsets): + # check that the transforms are correct + trans_train_dset = model_pipeline.model_wrapper.transform_dataset(train_dset, fold=i) + transformer = model_pipeline.model_wrapper.transformers[i][0] + + # the mean of each fold is the square of the fold value + fold_means = [f*f for f in range(num_folds)] + fold_means.remove(i*i) + expected_mean_1 = sum(fold_means)/len(fold_means) + expected_mean_2 = expected_mean_1*10 + + # mean should be nearly 0 + assert abs(np.mean(trans_train_dset.y)) < 1e-4 + + # transformer means should be around expected_mean + assert(transformer.y_means.shape==(2,)) + expected_y_means = np.array([expected_mean_1, expected_mean_2]) + np.testing.assert_array_almost_equal(transformer.y_means, expected_y_means) + + # std should be nearly 1 + assert abs(np.std(trans_train_dset.y) - 1) < 1e-4 + + # validation set has a different distribution + trans_valid_dset = model_pipeline.model_wrapper.transform_dataset(valid_dset, fold=i) + + # validation mean should not be 0 + assert abs(np.mean(trans_valid_dset.y)) > 1e-4 + + # test that the final transformer is correct + expected_mean_1 = sum([f*f for f in range(num_folds)])/num_folds + expected_mean_2 = expected_mean_1*10 + expected_y_means = np.array([expected_mean_1, expected_mean_2]) + + trans_combined_dset = model_pipeline.model_wrapper.transform_dataset( + model_pipeline.data.combined_training_data(), fold='final') + + # mean should be nearly 0 + assert abs(np.mean(trans_combined_dset.y)) < 1e-4 + + assert(len(model_pipeline.model_wrapper.transformers['final'])==1) + transformer = model_pipeline.model_wrapper.transformers['final'][0] + assert(isinstance(transformer, trans.NormalizationTransformerMissingData)) + # transformer means should be around expected_mean + np.testing.assert_array_almost_equal(transformer.y_means, expected_y_means) + if __name__ == '__main__': - test_kfold_transformers() - test_all_transformers() - test_balancing_transformer() \ No newline at end of file + test_kfold_regression_transformers() + #test_kfold_transformers() + #test_all_transformers() + #test_balancing_transformer() \ No newline at end of file From d3097619f4dc09efd1457dfbac2e1008ffac5e71 Mon Sep 17 00:00:00 2001 From: "he6@llnl.gov" Date: Tue, 7 Jan 2025 14:39:52 -0800 Subject: [PATCH 30/30] Removed unused 'fold' parameter. Added documentation for this PR --- atomsci/ddm/pipeline/model_datasets.py | 28 ++++++-- atomsci/ddm/pipeline/model_wrapper.py | 51 +++++++++----- atomsci/ddm/pipeline/transformations.py | 25 ++++++- .../test_balancing_transformer.py | 43 +++++++++++- .../test/integrative/make_test_datasets.py | 37 ++++++++++ atomsci/ddm/test/unit/test_perf_data.py | 61 ++++++++++++++++ atomsci/ddm/test/unit/test_transformers.py | 70 +++++++++++++++++++ 7 files changed, 288 insertions(+), 27 deletions(-) diff --git a/atomsci/ddm/pipeline/model_datasets.py b/atomsci/ddm/pipeline/model_datasets.py index e89b45bb..8ebd7aa8 100644 --- a/atomsci/ddm/pipeline/model_datasets.py +++ b/atomsci/ddm/pipeline/model_datasets.py @@ -251,6 +251,19 @@ class ModelDataset(object): combined_train_valid_data (dc.Dataset): A dataset object (initialized as None), of the merged train and valid splits + combined_train_valid_data (dc.NumpyDataset): Cache for combined training and validation data, + used by k-fold CV code + + subset_response_dict (dictionary): Cache for subset-specific response values matched to IDs, + used by k-fold CV code + + subset_weight_dict (dictionary): Cache for subset-specific weights matched to IDs, + used by k-fold CV code + + untransformed_response_dict (dictionary): Cache for untransformed response values + matched to IDs, used by k-fold CV code + + set in get_featurized_data: dataset: A new featurized DeepChem Dataset. @@ -316,7 +329,7 @@ def __init__(self, params, featurization): self.combined_train_valid_data = None # Cache for subset-specific response values matched to IDs, used by k-fold CV code self.subset_response_dict = {} - # Cache for subset-specific response values matched to IDs, used by k-fold CV code + # Cache for subset-specific weights matched to IDs, used by k-fold CV code self.subset_weight_dict = {} # Cache for untransformed response values matched to IDs, used by k-fold CV code self.untransformed_response_dict = {} @@ -355,6 +368,7 @@ def get_featurized_data(self, params=None): n_features: The count of features (int) vals: The response col after featurization (np.array) attr: A pd.dataframe containing the compound ids and smiles + untranfsormed_dataset: A NumpyDataset containing untransformed data """ if params is None: @@ -692,8 +706,6 @@ def get_subset_responses_and_weights(self, subset): Args: subset (string): Label of subset, 'train', 'test', or 'valid' - transformers: Transformers object for full dataset - Returns: tuple(response_dict, weight_dict) (response_dict): dictionary mapping compound ids to arrays of per-task untransformed response values @@ -718,8 +730,16 @@ def get_subset_responses_and_weights(self, subset): # ************************************************************************************* def get_untransformed_responses(self, ids): - """ Returns a numpy array of untransformed response values """ + Returns a numpy array of untransformed response values for the given IDs. + + Parameters: + ids (list or np.ndarray): List or array of IDs for which to retrieve untransformed response values. + + Returns: + np.ndarray: A numpy array of untransformed response values corresponding to the given IDs. + """ + response_vals = np.zeros((len(ids), self.untransformed_dataset.y.shape[1])) if len(self.untransformed_response_dict) == 0: self.untransformed_response_dict = dict(zip(self.untransformed_dataset.ids, self.untransformed_dataset.y)) diff --git a/atomsci/ddm/pipeline/model_wrapper.py b/atomsci/ddm/pipeline/model_wrapper.py index b4e4d4ab..59e515b6 100644 --- a/atomsci/ddm/pipeline/model_wrapper.py +++ b/atomsci/ddm/pipeline/model_wrapper.py @@ -239,11 +239,16 @@ class ModelWrapper(object): output_dir (str): The parent path of the model directory - transformers (list): Initialized as an empty list, stores the transformers on the response cols + transformers (dict of lists): Initialized using transformers.get_blank_transformations. + Keyed using integer fold numbers or 'final' e.g., {0:[], 1:[], 'final':[]}. + Stores deepchem transformation objects on the response cols for each fold and uses the 'final' key for + the transformer fitted for the final model. When using k-fold validation, 'final' is fitted + using all training and validation data. Without k-fold validation, transformers for 0 and 'final' + are the same. - transformers_x (list): Initialized as an empty list, stores the transformers on the features + transformers_x (dict of lists): Same as transformers, but stores the transformers on the features - transformers_w (list): Initialized as an empty list, stores the transformers on the weights + transformers_w (dict of lists): Same as transformers, but stores the transformers on the weights set in setup_model_dirs: best_model_dir (str): The subdirectory under output_dir that contains the best model. Created in setup_model_dirs @@ -269,11 +274,17 @@ def __init__(self, params, featurizer, ds_client): output_dir (str): The parent path of the model directory - transformers (list): Initialized as an empty list, stores the transformers on the response cols + transformers (dict of lists): Initialized using transformers.get_blank_transformations. + Keyed using integer fold numbers or 'final' e.g., {0:[], 1:[], 'final':[]}. + Stores deepchem transformation objects on the response cols for each fold and uses the 'final' key for + the transformer fitted for the final model. When using k-fold validation, 'final' is fitted + using all training and validation data. Without k-fold validation, transformers for 0 and 'final' + are the same. - transformers_x (list): Initialized as an empty list, stores the transformers on the features + transformers_x (dict of lists): Same as transformers, but stores the transformers on the features + + transformers_w (dict of lists): Same as transformers, but stores the transformers on the weights - transformers_w (list): Initialized as an empty list, stores the transformers on the weights """ self.params = params @@ -328,7 +339,7 @@ def _create_output_transformers(self, dataset): """Initialize transformers for responses and persist them for later. Args: - model_dataset: The ModelDataset object that handles the current dataset + dataset: A dc.Dataset object Side effects: Overwrites the attributes: @@ -346,7 +357,7 @@ def _create_feature_transformers(self, dataset): """Initialize transformers for features, and persist them for later. Args: - model_dataset: The ModelDataset object that handles the current dataset + dataset: A dc.Dataset object Side effects: Overwrites the attributes: @@ -361,17 +372,21 @@ def create_transformers(self, training_datasets): """Initialize transformers for responses, features and weights, and persist them for later. Args: - training_datasets: The ModelDataset object that handles the current dataset + training_datasets: A dictionary of dc.Datasets containing the training data from + each fold. Generated using transformers.get_all_training_datasets. Side effects: Overwrites the attributes: - transformers: A list of deepchem transformation objects on responses, only if conditions are met - - transformers_x: A list of deepchem transformation objects on features, only if conditions are met. + transformers (dict of lists): Initialized using transformers.get_blank_transformations. + Keyed using integer fold numbers or 'final' e.g., {0:[], 1:[], 'final':[]}. + Stores deepchem transformation objects on the response cols for each fold and uses the 'final' key for + the transformer fitted for the final model. When using k-fold validation, 'final' is fitted + using all training and validation data. Without k-fold validation, transformers for 0 and 'final' + are the same. - transformers_w: A list of deepchem transformation objects on weights, only if conditions are met. + transformers_x (dict of lists): Same as transformers, but stores the transformers on the features - params.transformer_key: A string pointing to the dataset key containing the transformer in the datastore, or the path to the transformer + transformers_w (dict of lists): Same as transformers, but stores the transformers on the weights """ total_transformers = 0 @@ -459,7 +474,7 @@ def transform_dataset(self, dataset, fold): Args: dataset: The DeepChem DiskDataset that contains a dataset - fold (int): Which fold is being transformed. + fold (int/str): Which fold is being transformed. Returns: transformed_dataset: The transformed DeepChem DiskDataset @@ -511,7 +526,7 @@ def get_train_valid_pred_results(self, perf_data): return perf_data.get_prediction_results() # **************************************************************************************** - def get_test_perf_data(self, model_dir, model_dataset, fold): + def get_test_perf_data(self, model_dir, model_dataset): """Returns the predicted values and metrics for the current test dataset against the version of the model stored in model_dir, as a PerfData object. @@ -553,7 +568,7 @@ def get_test_pred_results(self, model_dir, model_dataset): return perf_data.get_prediction_results() # **************************************************************************************** - def get_full_dataset_perf_data(self, model_dataset, fold): + def get_full_dataset_perf_data(self, model_dataset): """Returns the predicted values and metrics from the current model for the full current dataset, as a PerfData object. @@ -1617,7 +1632,7 @@ def _create_output_transformers(self, dataset): """Initialize transformers for responses and persist them for later. Args: - model_dataset: The ModelDataset object that handles the current dataset + dataset: The dc.Dataset object that contains the current training dataset Side effects: Overwrites the attributes: diff --git a/atomsci/ddm/pipeline/transformations.py b/atomsci/ddm/pipeline/transformations.py index fd8bedaa..7f42f003 100644 --- a/atomsci/ddm/pipeline/transformations.py +++ b/atomsci/ddm/pipeline/transformations.py @@ -70,9 +70,12 @@ def create_feature_transformers(params, featurization, train_dset): DeepChem transformer object holding its parameters. Args: - params (argparse.namespace: Object containing the parameter list + params (argparse.namespace): Object containing the parameter list + + featurization (featurization.Featurization): A Featurization object that will be used with + the train_dset object. - model_dataset (ModelDataset): Contains the dataset to be transformed. + train_dset (dc.Dataset): Contains the dataset used to fit the the transformers. Returns: (list of DeepChem transformer objects): list of transformers for the feature matrix @@ -102,7 +105,7 @@ def create_weight_transformers(params, dataset): Args: params (argparse.namespace: Object containing the parameter list - model_dataset (ModelDataset): Contains the dataset to be transformed. + dataset (dc.Dataset): Contains the dataset to be transformed. Returns: (list of DeepChem transformer objects): list of transformers for the weight matrix @@ -146,6 +149,12 @@ def get_transformer_keys(params): There is one set of transformers for each fold and then one transformer for both validation and training sets. AMPL automatically trains a model using all validation and training data at the end of the training loop. + + Args: + params (argparse.namespace: Object containing the parameter list + + Returns: + (list): A list of all keys used in transformer dictionaries. """ if params.split_strategy != 'k_fold_cv': return [0, 'final'] @@ -156,6 +165,9 @@ def get_transformer_keys(params): def get_blank_transformations(): """Get empty transformations dictionary These keys must always exist, even when there are no transformations + + Returns: + (dict): A dictionary containing empty lists. Used when no transformers are needed """ return {0:[], 'final':[]} @@ -165,6 +177,13 @@ def get_all_training_datasets(model_dataset): This takes a model_dataset and returns a dictionary of all datasets that will need a transformer. The keys will match what is returned by get_transformer_keys + + Args: + model_dataset: A model_datasets.ModelDataset object containing the current dataset. + + Returns: + dict of dc.Datasets: A dictionary keyed using keys fold numbers and 'final'. Contains + the training data for each fold and the final training+validation training set. """ result = {} if model_dataset.splitting is None: diff --git a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py index 7ddf0b2d..f09369e7 100644 --- a/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py +++ b/atomsci/ddm/test/integrative/balancing_trans/test_balancing_transformer.py @@ -16,6 +16,9 @@ logger = logging.getLogger(__name__) def test_balancing_transformer(): + """ + Test the balancing transformer to ensure that it correctly adjusts weights for imbalanced datasets. + """ dset_key = make_relative_to_file('../../test_datasets/MRP3_dataset.csv') res_dir = tempfile.mkdtemp() @@ -32,6 +35,10 @@ def test_balancing_transformer(): assert weight == 1 def test_all_transformers(): + """ + Test all transformers to ensure they work correctly with the dataset. + + """ res_dir = tempfile.mkdtemp() dskey = os.path.join(res_dir, 'special_test_dset.csv') params = read_params( @@ -88,6 +95,9 @@ def test_all_transformers(): assert (valid_weight1*valid_count1*4 - valid_weight2*valid_count2) < 1e-4 def make_pipeline(params): + """ + Generates a pipeline given parameters + """ pparams = parse.wrapper(params) model_pipeline = mp.ModelPipeline(pparams) model_pipeline.train_model() @@ -95,6 +105,9 @@ def make_pipeline(params): return model_pipeline def make_pipeline_and_get_weights(params): + """ + Generates the pipeline and gets the weights given parameters + """ model_pipeline = make_pipeline(params) model_wrapper = model_pipeline.model_wrapper train_dataset = model_pipeline.data.train_valid_dsets[0][0] @@ -103,12 +116,27 @@ def make_pipeline_and_get_weights(params): return transformed_data.w def make_relative_to_file(relative_path): + """ + Generates the full path relative to the location of this file. + """ + script_path = os.path.dirname(os.path.realpath(__file__)) result = os.path.join(script_path, relative_path) return result def read_params(json_file, tmp_dskey, res_dir): + """ + Read parameters from a JSON file and update them with the dataset key and result directory. + + Parameters: + json_file (str): Path to the JSON file containing parameters. + tmp_dskey (str): Temporary dataset key. + res_dir (str): Result directory. + + Returns: + dict: Updated parameters. + """ with open(json_file, 'r') as file: params = json.load(file) params['result_dir'] = res_dir @@ -116,7 +144,9 @@ def read_params(json_file, tmp_dskey, res_dir): return params def params_wo_balan(dset_key, res_dir): - # Train classification models without balancing weights. Repeat this several times so we can get some statistics on the performance metrics. + """ + Reads params for models without balancing weight + """ params = read_params( make_relative_to_file('jsons/wo_balancing_transformer.json'), dset_key, @@ -125,7 +155,9 @@ def params_wo_balan(dset_key, res_dir): return params def params_w_balan(dset_key, res_dir): - # Now train models on the same dataset with balancing weights + """ + Reads params for models with balancing weight + """ params = read_params( make_relative_to_file('jsons/balancing_transformer.json'), dset_key, @@ -135,6 +167,9 @@ def params_w_balan(dset_key, res_dir): return params def test_kfold_transformers(): + """ + Test transformers for a kfold classification model + """ res_dir = tempfile.mkdtemp() dskey = os.path.join(res_dir, 'special_test_dset.csv') params = read_params( @@ -217,6 +252,10 @@ def test_kfold_transformers(): np.testing.assert_array_almost_equal(transformer_x.X_means, np.ones_like(transformer_x.X_means)*expected_mean) def test_kfold_regression_transformers(): + """ + Tests transformers for each fold of a kfold regression model. Ensures + that the transformers are correct for each fold. + """ res_dir = tempfile.mkdtemp() dskey = os.path.join(res_dir, 'special_test_dset.csv') params = read_params( diff --git a/atomsci/ddm/test/integrative/make_test_datasets.py b/atomsci/ddm/test/integrative/make_test_datasets.py index e6ed8764..2b7767db 100644 --- a/atomsci/ddm/test/integrative/make_test_datasets.py +++ b/atomsci/ddm/test/integrative/make_test_datasets.py @@ -19,6 +19,9 @@ def get_absolute_path(relative_path): return absolute_path def get_features(feature_type): + """ + Gets the feature columns given a feature_type, e.g. rdkit_raw + """ desc_spec_file = get_absolute_path('../../data/descriptor_sets_sources_by_descr_type.csv') desc_spec_df = pd.read_csv(desc_spec_file, index_col=False) @@ -157,6 +160,17 @@ def make_split_df(train_ids, valid_ids, test_ids): return split_df def make_test_dataset_and_split(dataset_key, feature_types): + """ + Given a dataset key and a feature type, create a featurized + csv file and split. + + Args: + dataset_key (str): Where to save the newly generated test DataFrame + + feature_types (str): A feature type, e.g. rdkit_raw + + + """ features = get_features(feature_types) df, train_ids, valid_ids, test_ids = make_test_dataset(features) @@ -211,6 +225,17 @@ def make_kfold_test_dataset(features, fold_size=1000, num_test=1000, num_folds=5 return df, train_valid_ids, test_ids def make_kfold_split_df(train_valid_ids, test_ids): + """ + Given lists of train_valid_ids and test_ids create a split DataFrame + + Args: + train_valid_ids (list of lists): A list of ids + + test_ids (list): Ids for test compounds + + Returns: + DataFrame: A split DataFrame that can be read by AMPL + """ fold_dfs = [] for i, tvi in enumerate(train_valid_ids): data = {'cmpd_id':tvi} @@ -230,6 +255,18 @@ def make_kfold_split_df(train_valid_ids, test_ids): return split_df def make_kfold_dataset_and_split(dataset_key, feature_types, num_folds=3): + """ + Given a dataset key and a feature type, and nubmer of folds create a featurized + csv file and split. + + Args: + dataset_key (str): Where to save the newly generated test DataFrame + + feature_types (str): A feature type, e.g. rdkit_raw + + num_folds (int): Number of folds + + """ features = get_features(feature_types) df, train_ids, test_ids = make_kfold_test_dataset(features, num_folds=num_folds) diff --git a/atomsci/ddm/test/unit/test_perf_data.py b/atomsci/ddm/test/unit/test_perf_data.py index 20275fe2..f5752abc 100644 --- a/atomsci/ddm/test/unit/test_perf_data.py +++ b/atomsci/ddm/test/unit/test_perf_data.py @@ -10,10 +10,28 @@ import json def copy_to_temp(dskey, res_dir): + """ + Copy a dataset to a temporary directory. + + Parameters: + dskey (str): Path to the original dataset. + res_dir (str): Path to the temporary directory. + + Returns: + str: Path to the copied dataset in the temporary directory. + """ new_dskey = shutil.copy(dskey, res_dir) return new_dskey def setup_paths(): + """ + Set up the paths for the test, including creating a temporary directory and copying the dataset to it. + + Returns: + tuple: A tuple containing: + - res_dir (str): Path to the temporary result directory. + - tmp_dskey (str): Path to the copied dataset in the temporary directory. + """ script_path = os.path.dirname(os.path.realpath(__file__)) res_dir = tempfile.mkdtemp() dskey = os.path.join(script_path, '../test_datasets/aurka_chembl_base_smiles_union.csv') @@ -22,6 +40,18 @@ def setup_paths(): return res_dir, tmp_dskey def read_params(json_file, res_dir, tmp_dskey): + """ + Read parameters from a JSON file and update them with the result directory and dataset key. + + Parameters: + json_file (str): Path to the JSON file containing parameters. + res_dir (str): Path to the result directory. + tmp_dskey (str): Path to the copied dataset in the temporary directory. + + Returns: + dict: Updated parameters. + """ + with open(json_file, 'r') as file: params = json.load(file) params['result_dir'] = res_dir @@ -29,12 +59,25 @@ def read_params(json_file, res_dir, tmp_dskey): return params def make_relative_to_file(relative_path): + """ + Generates the full path relative to the location of this file. + + Parameters: + relative_path (str): The relative path to convert. + + Returns: + str: The absolute path corresponding to the relative path. + """ script_path = os.path.dirname(os.path.realpath(__file__)) result = os.path.join(script_path, relative_path) return result def test_KFoldRegressionPerfData(): + """ + Test the KFoldRegressionPerfData class to ensure it correctly handles k-fold regression performance data. + + """ res_dir, tmp_dskey = setup_paths() params = read_params(make_relative_to_file('config_perf_data_KFoldRegressoinPerfData.json'), @@ -78,6 +121,9 @@ def test_KFoldRegressionPerfData(): assert r2_std==0 def test_KFoldRegressionPerfDataMulti(): + """ + Test the KFoldRegressionPerfData class for multi-fold regression performance data. + """ res_dir, tmp_dskey = setup_paths() # duplicate pIC50 column @@ -126,6 +172,10 @@ def test_KFoldRegressionPerfDataMulti(): assert r2_std==0 def test_KFoldClassificationPerfData(): + """ + Test the KFoldClassificationPerfData functionality. + + """ res_dir, tmp_dskey = setup_paths() params = read_params( @@ -175,6 +225,10 @@ def test_KFoldClassificationPerfData(): assert roc_auc_std==0 def test_SimpleRegressionPerfData(): + """ + Test the SimpleRegressionPerfData class for correct performance data creation and metrics computation. + + """ res_dir, tmp_dskey = setup_paths() params = read_params( @@ -216,6 +270,13 @@ def test_SimpleRegressionPerfData(): assert r2_mean == 1 def test_SimpleClassificationPerfData(): + """ + Test function for SimpleClassificationPerfData. + + This function sets up a model pipeline, trains a model, and creates performance data + for a simple classification task. It then verifies the following: + + """ res_dir, tmp_dskey = setup_paths() params = read_params( diff --git a/atomsci/ddm/test/unit/test_transformers.py b/atomsci/ddm/test/unit/test_transformers.py index 46a3fbd3..69cfebd6 100644 --- a/atomsci/ddm/test/unit/test_transformers.py +++ b/atomsci/ddm/test/unit/test_transformers.py @@ -4,6 +4,18 @@ def test_no_missing_values(): + """ + Test the `get_statistics_missing_ydata` function from the `trans` module + to ensure it correctly calculates the mean and standard deviation of the + y-values when there are no missing values in the dataset. + + The test creates a dataset with no missing y-values and checks that the + calculated means and standard deviations match the expected values. + + Assertions: + - The means of the y-values should be [3.0, 4.0]. + - The standard deviations of the y-values should be approximately [1.632993, 1.632993]. + """ y = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) w = np.array([[1, 1], [1, 1], [1, 1]]) x = np.ones_like(y) @@ -14,6 +26,20 @@ def test_no_missing_values(): np.testing.assert_array_almost_equal(y_stds, [1.632993, 1.632993]) def test_some_missing_values(): + """ + Test the handling of missing values in the dataset. + + This test creates a dataset with some missing values in the target variable `y` + and verifies that the `get_statistics_missing_ydata` function correctly computes + the means and standard deviations of the non-missing values. + + The test checks that the computed means and standard deviations of the non-missing + values in `y` match the expected values. + + Assertions: + - The means of the non-missing values in `y` should be approximately [3.0, 5.0]. + - The standard deviations of the non-missing values in `y` should be approximately [1.632993, 1.0]. + """ y = np.array([[1.0, np.nan], [3.0, 4.0], [5.0, 6.0]]) w = np.array([[1, 0], [1, 1], [1, 1]]) x = np.ones_like(y) @@ -24,6 +50,16 @@ def test_some_missing_values(): np.testing.assert_array_almost_equal(y_stds, [1.632993, 1.0]) def test_all_missing_values(): + """ + Test the `get_statistics_missing_ydata` function with a dataset where all y-values are missing (NaN). + + This test creates a dataset with all missing y-values and checks if the function correctly computes + the means and standard deviations of the y-values, which should both be arrays of zeros. + + The test asserts that: + - The means of the y-values are [0.0, 0.0]. + - The standard deviations of the y-values are [0.0, 0.0]. + """ y = np.array([[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]]) w = np.array([[0, 0], [0, 0], [0, 0]]) x = np.ones_like(y) @@ -34,6 +70,18 @@ def test_all_missing_values(): np.testing.assert_array_almost_equal(y_stds, [0.0, 0.0]) def test_one_task_no_missing_values(): + """ + Test the `get_statistics_missing_ydata` function with a dataset that has no missing values. + + This test creates a dataset with no missing values and checks if the mean and standard deviation + of the y-values are calculated correctly. + + The expected mean of y-values is [3.0] and the expected standard deviation is [1.632993]. + + Asserts: + - The calculated mean of y-values is almost equal to [3.0]. + - The calculated standard deviation of y-values is almost equal to [1.632993]. + """ y = np.array([[1.0], [3.0], [5.0]]) w = np.array([[1], [1], [1]]) x = np.ones_like(y) @@ -44,6 +92,16 @@ def test_one_task_no_missing_values(): np.testing.assert_array_almost_equal(y_stds, [1.632993]) def test_normalization_transformer_missing_data(): + """ + Test the NormalizationTransformerMissingData class for handling missing data in the target variable. + + The expected means and standard deviations for `y` are: + - Means: [3.0, 5.0] + - Standard deviations: [1.632993, 1.0] + + The expected transformed `y` values are: + - [[-1.224745, 0], [0.0, -1.0], [1.224745, 1.0]] + """ # Create a mock dataset X = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) y = np.array([[1.0, np.nan], [3.0, 4.0], [5.0, 6.0]]) @@ -69,6 +127,18 @@ def test_normalization_transformer_missing_data(): np.testing.assert_array_almost_equal(transformed_dataset.y, expected_transformed_y, decimal=6) def test_normalization_transformer_missing_data_transform_X(): + """ + Test the NormalizationTransformerMissingData with transform_X=True. + + This test verifies the following: + 1. The means and standard deviations of the features in the dataset are correctly computed. + 2. The transformation is correctly applied to the dataset. + + Assertions: + - The computed means of the features should be [3.0, 4.0]. + - The computed standard deviations of the features should be approximately [1.632993, 1.632993]. + - The transformed feature values should match the expected transformed values with a precision of 6 decimal places. + """ # Create a mock dataset X = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) y = np.array([[1.0], [3.0], [5.0]])