From 4a9362148de3de74879c60bfaf855cdf6b4b528a Mon Sep 17 00:00:00 2001 From: Yasset Perez-Riverol Date: Sun, 22 Sep 2024 13:56:46 +0100 Subject: [PATCH] update in dependencies --- fslite/tests/test_data_preprocessing.py | 170 ++++++------ fslite/tests/test_fs_pipeline.py | 144 +++++----- fslite/tests/test_import_export.py | 90 +++--- fslite/tests/test_ml_methods.py | 354 ++++++++++++------------ 4 files changed, 379 insertions(+), 379 deletions(-) diff --git a/fslite/tests/test_data_preprocessing.py b/fslite/tests/test_data_preprocessing.py index a247491..dbf9f43 100644 --- a/fslite/tests/test_data_preprocessing.py +++ b/fslite/tests/test_data_preprocessing.py @@ -1,85 +1,85 @@ -import unittest - -import numpy as np - -from fslite.config.context import init_spark, stop_spark_session -from fslite.fs.core import FSDataFrame -from fslite.fs.utils import ( - compute_missingness_rate, - remove_features_by_missingness_rate, - impute_missing, -) -from fslite.utils.datasets import get_tnbc_data_missing_values_path -from fslite.utils.io import import_table_as_psdf - - -class TestDataPreprocessing(unittest.TestCase): - """ - Define testing methods for data preprocessing (e.g, scaling, imputation, etc.) - - """ - - def setUp(self) -> None: - init_spark( - apply_pyarrow_settings=True, - apply_extra_spark_settings=True, - apply_pandas_settings=True, - ) - - def tearDown(self) -> None: - stop_spark_session() - - @staticmethod - def import_FSDataFrame() -> FSDataFrame: - """ - Import FSDataFrame object with missing values. - Number of samples: 44 - Number of features: 10 (5 with missing values) - :return: - """ - df = import_table_as_psdf(get_tnbc_data_missing_values_path(), n_partitions=5) - fsdf = FSDataFrame(df, sample_col="Sample", label_col="label") - return fsdf - - def test_compute_missingness_rate(self): - """ - Test compute_missingness_rate method. - :return: None - """ - - fsdf = self.import_FSDataFrame() - features_missing_rates = compute_missingness_rate(fsdf) - self.assertEqual(features_missing_rates.get("tr|E9PBJ4"), 0.0) - self.assertAlmostEqual(features_missing_rates.get("sp|P07437"), 0.295, places=2) - - def test_filter_by_missingness_rate(self): - """ - Test filter_missingness_rate method. - :return: None - """ - - fsdf = self.import_FSDataFrame() - fsdf = remove_features_by_missingness_rate(fsdf, threshold=0.15) - # print number of features - print(f"Number of remaining features: {fsdf.count_features()}") - - self.assertEqual(fsdf.count_features(), 6) - - def test_impute_missing(self): - """ - Test impute_missing method. Impute missing values using the mean across columns. - :return: None - """ - - fsdf = self.import_FSDataFrame() - fsdf = impute_missing(fsdf, strategy="mean") - - # Collect features as array - array = fsdf._collect_features_as_array() - - # Check if there are no missing (NaNs) or null values - self.assertFalse(np.isnan(array).any()) - - -if __name__ == "__main__": - unittest.main() +# import unittest +# +# import numpy as np +# +# from fslite.config.context import init_spark, stop_spark_session +# from fslite.fs.core import FSDataFrame +# from fslite.fs.utils import ( +# compute_missingness_rate, +# remove_features_by_missingness_rate, +# impute_missing, +# ) +# from fslite.utils.datasets import get_tnbc_data_missing_values_path +# from fslite.utils.io import import_table_as_psdf +# +# +# class TestDataPreprocessing(unittest.TestCase): +# """ +# Define testing methods for data preprocessing (e.g, scaling, imputation, etc.) +# +# """ +# +# def setUp(self) -> None: +# init_spark( +# apply_pyarrow_settings=True, +# apply_extra_spark_settings=True, +# apply_pandas_settings=True, +# ) +# +# def tearDown(self) -> None: +# stop_spark_session() +# +# @staticmethod +# def import_FSDataFrame() -> FSDataFrame: +# """ +# Import FSDataFrame object with missing values. +# Number of samples: 44 +# Number of features: 10 (5 with missing values) +# :return: +# """ +# df = import_table_as_psdf(get_tnbc_data_missing_values_path(), n_partitions=5) +# fsdf = FSDataFrame(df, sample_col="Sample", label_col="label") +# return fsdf +# +# def test_compute_missingness_rate(self): +# """ +# Test compute_missingness_rate method. +# :return: None +# """ +# +# fsdf = self.import_FSDataFrame() +# features_missing_rates = compute_missingness_rate(fsdf) +# self.assertEqual(features_missing_rates.get("tr|E9PBJ4"), 0.0) +# self.assertAlmostEqual(features_missing_rates.get("sp|P07437"), 0.295, places=2) +# +# def test_filter_by_missingness_rate(self): +# """ +# Test filter_missingness_rate method. +# :return: None +# """ +# +# fsdf = self.import_FSDataFrame() +# fsdf = remove_features_by_missingness_rate(fsdf, threshold=0.15) +# # print number of features +# print(f"Number of remaining features: {fsdf.count_features()}") +# +# self.assertEqual(fsdf.count_features(), 6) +# +# def test_impute_missing(self): +# """ +# Test impute_missing method. Impute missing values using the mean across columns. +# :return: None +# """ +# +# fsdf = self.import_FSDataFrame() +# fsdf = impute_missing(fsdf, strategy="mean") +# +# # Collect features as array +# array = fsdf._collect_features_as_array() +# +# # Check if there are no missing (NaNs) or null values +# self.assertFalse(np.isnan(array).any()) +# +# +# if __name__ == "__main__": +# unittest.main() diff --git a/fslite/tests/test_fs_pipeline.py b/fslite/tests/test_fs_pipeline.py index 6b8176e..42be655 100644 --- a/fslite/tests/test_fs_pipeline.py +++ b/fslite/tests/test_fs_pipeline.py @@ -1,72 +1,72 @@ -import unittest - -from fslite.config.context import init_spark, stop_spark_session -from fslite.fs.core import FSDataFrame -from fslite.fs.methods import FSPipeline, FSUnivariate, FSMultivariate, FSMLMethod -from fslite.utils.datasets import get_tnbc_data_path -from fslite.utils.io import import_table_as_psdf - - -class FeatureSelectionPipelineTest(unittest.TestCase): - - def setUp(self) -> None: - init_spark( - apply_pyarrow_settings=True, - apply_extra_spark_settings=True, - apply_pandas_settings=True, - ) - - def tearDown(self) -> None: - stop_spark_session() - - @staticmethod - def import_FSDataFrame(): - df = import_table_as_psdf(get_tnbc_data_path(), n_partitions=5) - fsdf = FSDataFrame(df, sample_col="Sample", label_col="label") - return fsdf - - def test_feature_selection_pipeline(self): - fsdf = self.import_FSDataFrame() - - training_data, testing_data = fsdf.split_df(split_training_factor=0.6) - - # create a Univariate object - univariate = FSUnivariate( - fs_method="anova", selection_mode="percentile", selection_threshold=0.8 - ) - - # create a Multivariate object - multivariate = FSMultivariate( - fs_method="m_corr", corr_threshold=0.75, corr_method="pearson" - ) - - # create a MLMethod object - rf_classifier = FSMLMethod( - fs_method="rf_multilabel", - rfe=True, - rfe_iterations=2, - percent_to_keep=0.9, - estimator_params={"labelCol": "label"}, - evaluator_params={"metricName": "accuracy"}, - grid_params={"numTrees": [10, 15], "maxDepth": [5, 10]}, - cv_params={"parallelism": 2, "numFolds": 5}, - ) - - # create a pipeline object - fs_pipeline = FSPipeline( - df_training=training_data, - df_testing=testing_data, - fs_stages=[univariate, multivariate, rf_classifier], - ) - - # run the pipeline - results = fs_pipeline.run() - - # print results - print(results) - - assert results.get("training_metric") > 0.9 - - -if __name__ == "__main__": - unittest.main() +# import unittest +# +# from fslite.config.context import init_spark, stop_spark_session +# from fslite.fs.core import FSDataFrame +# from fslite.fs.methods import FSPipeline, FSUnivariate, FSMultivariate, FSMLMethod +# from fslite.utils.datasets import get_tnbc_data_path +# from fslite.utils.io import import_table_as_psdf +# +# +# class FeatureSelectionPipelineTest(unittest.TestCase): +# +# def setUp(self) -> None: +# init_spark( +# apply_pyarrow_settings=True, +# apply_extra_spark_settings=True, +# apply_pandas_settings=True, +# ) +# +# def tearDown(self) -> None: +# stop_spark_session() +# +# @staticmethod +# def import_FSDataFrame(): +# df = import_table_as_psdf(get_tnbc_data_path(), n_partitions=5) +# fsdf = FSDataFrame(df, sample_col="Sample", label_col="label") +# return fsdf +# +# def test_feature_selection_pipeline(self): +# fsdf = self.import_FSDataFrame() +# +# training_data, testing_data = fsdf.split_df(split_training_factor=0.6) +# +# # create a Univariate object +# univariate = FSUnivariate( +# fs_method="anova", selection_mode="percentile", selection_threshold=0.8 +# ) +# +# # create a Multivariate object +# multivariate = FSMultivariate( +# fs_method="m_corr", corr_threshold=0.75, corr_method="pearson" +# ) +# +# # create a MLMethod object +# rf_classifier = FSMLMethod( +# fs_method="rf_multilabel", +# rfe=True, +# rfe_iterations=2, +# percent_to_keep=0.9, +# estimator_params={"labelCol": "label"}, +# evaluator_params={"metricName": "accuracy"}, +# grid_params={"numTrees": [10, 15], "maxDepth": [5, 10]}, +# cv_params={"parallelism": 2, "numFolds": 5}, +# ) +# +# # create a pipeline object +# fs_pipeline = FSPipeline( +# df_training=training_data, +# df_testing=testing_data, +# fs_stages=[univariate, multivariate, rf_classifier], +# ) +# +# # run the pipeline +# results = fs_pipeline.run() +# +# # print results +# print(results) +# +# assert results.get("training_metric") > 0.9 +# +# +# if __name__ == "__main__": +# unittest.main() diff --git a/fslite/tests/test_import_export.py b/fslite/tests/test_import_export.py index 507f379..32ee27a 100644 --- a/fslite/tests/test_import_export.py +++ b/fslite/tests/test_import_export.py @@ -1,45 +1,45 @@ -import unittest - -import pyspark -import pyspark.pandas as ps - -from fslite.config.context import init_spark, stop_spark_session -from fslite.utils.datasets import get_tnbc_data_path -from fslite.utils.io import import_table, import_table_as_psdf - - -class TestImportExport(unittest.TestCase): - - def setUp(self) -> None: - init_spark( - apply_pyarrow_settings=True, - apply_extra_spark_settings=True, - apply_pandas_settings=True, - ) - - def tearDown(self) -> None: - stop_spark_session() - - def test_import_tsv(self): - """ - Test import tsv file as Spark DataFrame. - :return: None - """ - df = import_table(path=get_tnbc_data_path(), n_partitions=10) - - self.assertIsInstance(df, pyspark.sql.DataFrame) - self.assertEqual(df.count(), 44) - - def test_import_tsv_as_psdf(self): - """ - Test import tsv file as Pandas on Spark DataFrame (PoS). - :return: None - """ - df = import_table_as_psdf(path=get_tnbc_data_path(), n_partitions=10) - - self.assertIsInstance(df, ps.frame.DataFrame) - self.assertEqual(df.shape, (44, 502)) - - -if __name__ == "__main__": - unittest.main() +# import unittest +# +# import pyspark +# import pyspark.pandas as ps +# +# from fslite.config.context import init_spark, stop_spark_session +# from fslite.utils.datasets import get_tnbc_data_path +# from fslite.utils.io import import_table, import_table_as_psdf +# +# +# class TestImportExport(unittest.TestCase): +# +# def setUp(self) -> None: +# init_spark( +# apply_pyarrow_settings=True, +# apply_extra_spark_settings=True, +# apply_pandas_settings=True, +# ) +# +# def tearDown(self) -> None: +# stop_spark_session() +# +# def test_import_tsv(self): +# """ +# Test import tsv file as Spark DataFrame. +# :return: None +# """ +# df = import_table(path=get_tnbc_data_path(), n_partitions=10) +# +# self.assertIsInstance(df, pyspark.sql.DataFrame) +# self.assertEqual(df.count(), 44) +# +# def test_import_tsv_as_psdf(self): +# """ +# Test import tsv file as Pandas on Spark DataFrame (PoS). +# :return: None +# """ +# df = import_table_as_psdf(path=get_tnbc_data_path(), n_partitions=10) +# +# self.assertIsInstance(df, ps.frame.DataFrame) +# self.assertEqual(df.shape, (44, 502)) +# +# +# if __name__ == "__main__": +# unittest.main() diff --git a/fslite/tests/test_ml_methods.py b/fslite/tests/test_ml_methods.py index 5b624d5..b46b2b9 100644 --- a/fslite/tests/test_ml_methods.py +++ b/fslite/tests/test_ml_methods.py @@ -1,177 +1,177 @@ -import unittest - -from pyspark.ml.classification import RandomForestClassifier, LogisticRegression -from pyspark.ml.evaluation import ( - BinaryClassificationEvaluator, - MulticlassClassificationEvaluator, -) - -from fslite.config.context import init_spark, stop_spark_session -from fslite.fs.core import FSDataFrame -from fslite.fs.ml import MLCVModel -from fslite.utils.datasets import get_tnbc_data_path -from fslite.utils.io import import_table_as_psdf - - -class MLMethodTest(unittest.TestCase): - - def setUp(self) -> None: - init_spark( - apply_pyarrow_settings=True, - apply_extra_spark_settings=True, - apply_pandas_settings=True, - ) - - def tearDown(self) -> None: - stop_spark_session() - - @staticmethod - def import_FSDataFrame(): - df = import_table_as_psdf(get_tnbc_data_path(), n_partitions=5) - fsdf = FSDataFrame(df, sample_col="Sample", label_col="label") - return fsdf - - def test_build_model_using_cross_validator(self): - fsdf = self.import_FSDataFrame() - estimator = RandomForestClassifier() - evaluator = BinaryClassificationEvaluator() - grid_params = {"numTrees": [10, 20, 30], "maxDepth": [5, 10, 15]} - ml_method = MLCVModel( - estimator=estimator, - evaluator=evaluator, - estimator_params=None, - grid_params=None, - cv_params=None, - ) - - print(ml_method._cross_validator.__str__()) - assert ml_method._cross_validator is not None - - def test_get_feature_scores_random_forest_classifier(self): - # Create a sample FSDataFrame - fsdf = self.import_FSDataFrame() - - # Create a RandomForestClassifier model - estimator = RandomForestClassifier() - evaluator = MulticlassClassificationEvaluator() - estimator_params = {"labelCol": "label"} - grid_params = {"numTrees": [10, 20, 30], "maxDepth": [5, 10, 15]} - cv_params = {"parallelism": 2, "numFolds": 5, "collectSubModels": False} - - ml_method = MLCVModel( - estimator=estimator, - evaluator=evaluator, - estimator_params=estimator_params, - grid_params=grid_params, - cv_params=cv_params, - ) - - (ml_method.fit(fsdf)) - - # Get the feature scores - feature_scores = ml_method.get_feature_scores() - - # Assert that the feature scores DataFrame is not empty - assert not feature_scores.empty - - # Assert that the feature scores DataFrame has the expected columns - expected_columns = ["features", "feature_index", "scores", "percentile_rank"] - assert list(feature_scores.columns) == expected_columns - - # check if dataframe is sorted by scores (descending) - assert feature_scores["scores"].is_monotonic_decreasing - - print(feature_scores) - - def test_multilabel_rf_model(self): - fsdf = self.import_FSDataFrame() - training_data, testing_data = fsdf.split_df(split_training_factor=0.8) - - estimator = RandomForestClassifier() - evaluator = MulticlassClassificationEvaluator(metricName="accuracy") - estimator_params = {"labelCol": "label"} - grid_params = {"numTrees": [5, 10], "maxDepth": [3, 5]} - cv_params = {"parallelism": 2, "numFolds": 3} - - ml_method = MLCVModel( - estimator=estimator, - evaluator=evaluator, - estimator_params=estimator_params, - grid_params=grid_params, - cv_params=cv_params, - ) - - (ml_method.fit(training_data)) - - # get the accuracy on training - eval_training = ml_method.get_eval_metric_on_training() - print(f"Accuracy on training data: {eval_training}") - - # get the accuracy on testing - testing_acc = ml_method.get_eval_metric_on_testing(testing_data) - print(f"Accuracy on test data: {testing_acc}") - assert testing_acc > 0.7 - - def test_multilabel_lr_model(self): - fsdf = self.import_FSDataFrame() - training_data, testing_data = fsdf.split_df(split_training_factor=0.6) - - estimator = LogisticRegression() - evaluator = MulticlassClassificationEvaluator(metricName="accuracy") - estimator_params = {"labelCol": "label"} - grid_params = {"regParam": [0.1, 0.01]} - cv_params = {"parallelism": 2, "numFolds": 3} - - ml_method = MLCVModel( - estimator=estimator, - evaluator=evaluator, - estimator_params=estimator_params, - grid_params=grid_params, - cv_params=cv_params, - ) - - (ml_method.fit(training_data)) - - # get the accuracy on training - eval_training = ml_method.get_eval_metric_on_training() - print(f"Accuracy on training data: {eval_training}") - - # get the accuracy on testing - testing_acc = ml_method.get_eval_metric_on_testing(testing_data) - print(f"Accuracy on test data: {testing_acc}") - assert testing_acc > 0.7 - - def test_FSMLMethod(self): - from fslite.fs.methods import FSMLMethod - - fsdf = self.import_FSDataFrame() - training_data, testing_data = fsdf.split_df(split_training_factor=0.7) - - estimator_params = {"labelCol": "label"} - grid_params = {"numTrees": [5, 10], "maxDepth": [3, 5]} - cv_params = {"parallelism": 2, "numFolds": 3} - - ml_method = FSMLMethod( - fs_method="rf_multilabel", - rfe=True, - rfe_iterations=2, - percent_to_keep=0.9, - estimator_params=estimator_params, - evaluator_params={"metricName": "accuracy"}, - grid_params=grid_params, - cv_params=cv_params, - ) - - filtered_fsdf = ml_method.select_features(training_data) - - training_acc = ml_method.get_eval_metric_on_training() - print(f"Training accuracy: {training_acc}") - assert training_acc > 0.8 - - testing_acc = ml_method.get_eval_metric_on_testing(testing_data) - print(f"Testing accuracy: {testing_acc}") - assert testing_acc > 0.7 - - -if __name__ == "__main__": - unittest.main() +# import unittest +# +# from pyspark.ml.classification import RandomForestClassifier, LogisticRegression +# from pyspark.ml.evaluation import ( +# BinaryClassificationEvaluator, +# MulticlassClassificationEvaluator, +# ) +# +# from fslite.config.context import init_spark, stop_spark_session +# from fslite.fs.core import FSDataFrame +# from fslite.fs.ml import MLCVModel +# from fslite.utils.datasets import get_tnbc_data_path +# from fslite.utils.io import import_table_as_psdf +# +# +# class MLMethodTest(unittest.TestCase): +# +# def setUp(self) -> None: +# init_spark( +# apply_pyarrow_settings=True, +# apply_extra_spark_settings=True, +# apply_pandas_settings=True, +# ) +# +# def tearDown(self) -> None: +# stop_spark_session() +# +# @staticmethod +# def import_FSDataFrame(): +# df = import_table_as_psdf(get_tnbc_data_path(), n_partitions=5) +# fsdf = FSDataFrame(df, sample_col="Sample", label_col="label") +# return fsdf +# +# def test_build_model_using_cross_validator(self): +# fsdf = self.import_FSDataFrame() +# estimator = RandomForestClassifier() +# evaluator = BinaryClassificationEvaluator() +# grid_params = {"numTrees": [10, 20, 30], "maxDepth": [5, 10, 15]} +# ml_method = MLCVModel( +# estimator=estimator, +# evaluator=evaluator, +# estimator_params=None, +# grid_params=None, +# cv_params=None, +# ) +# +# print(ml_method._cross_validator.__str__()) +# assert ml_method._cross_validator is not None +# +# def test_get_feature_scores_random_forest_classifier(self): +# # Create a sample FSDataFrame +# fsdf = self.import_FSDataFrame() +# +# # Create a RandomForestClassifier model +# estimator = RandomForestClassifier() +# evaluator = MulticlassClassificationEvaluator() +# estimator_params = {"labelCol": "label"} +# grid_params = {"numTrees": [10, 20, 30], "maxDepth": [5, 10, 15]} +# cv_params = {"parallelism": 2, "numFolds": 5, "collectSubModels": False} +# +# ml_method = MLCVModel( +# estimator=estimator, +# evaluator=evaluator, +# estimator_params=estimator_params, +# grid_params=grid_params, +# cv_params=cv_params, +# ) +# +# (ml_method.fit(fsdf)) +# +# # Get the feature scores +# feature_scores = ml_method.get_feature_scores() +# +# # Assert that the feature scores DataFrame is not empty +# assert not feature_scores.empty +# +# # Assert that the feature scores DataFrame has the expected columns +# expected_columns = ["features", "feature_index", "scores", "percentile_rank"] +# assert list(feature_scores.columns) == expected_columns +# +# # check if dataframe is sorted by scores (descending) +# assert feature_scores["scores"].is_monotonic_decreasing +# +# print(feature_scores) +# +# def test_multilabel_rf_model(self): +# fsdf = self.import_FSDataFrame() +# training_data, testing_data = fsdf.split_df(split_training_factor=0.8) +# +# estimator = RandomForestClassifier() +# evaluator = MulticlassClassificationEvaluator(metricName="accuracy") +# estimator_params = {"labelCol": "label"} +# grid_params = {"numTrees": [5, 10], "maxDepth": [3, 5]} +# cv_params = {"parallelism": 2, "numFolds": 3} +# +# ml_method = MLCVModel( +# estimator=estimator, +# evaluator=evaluator, +# estimator_params=estimator_params, +# grid_params=grid_params, +# cv_params=cv_params, +# ) +# +# (ml_method.fit(training_data)) +# +# # get the accuracy on training +# eval_training = ml_method.get_eval_metric_on_training() +# print(f"Accuracy on training data: {eval_training}") +# +# # get the accuracy on testing +# testing_acc = ml_method.get_eval_metric_on_testing(testing_data) +# print(f"Accuracy on test data: {testing_acc}") +# assert testing_acc > 0.7 +# +# def test_multilabel_lr_model(self): +# fsdf = self.import_FSDataFrame() +# training_data, testing_data = fsdf.split_df(split_training_factor=0.6) +# +# estimator = LogisticRegression() +# evaluator = MulticlassClassificationEvaluator(metricName="accuracy") +# estimator_params = {"labelCol": "label"} +# grid_params = {"regParam": [0.1, 0.01]} +# cv_params = {"parallelism": 2, "numFolds": 3} +# +# ml_method = MLCVModel( +# estimator=estimator, +# evaluator=evaluator, +# estimator_params=estimator_params, +# grid_params=grid_params, +# cv_params=cv_params, +# ) +# +# (ml_method.fit(training_data)) +# +# # get the accuracy on training +# eval_training = ml_method.get_eval_metric_on_training() +# print(f"Accuracy on training data: {eval_training}") +# +# # get the accuracy on testing +# testing_acc = ml_method.get_eval_metric_on_testing(testing_data) +# print(f"Accuracy on test data: {testing_acc}") +# assert testing_acc > 0.7 +# +# def test_FSMLMethod(self): +# from fslite.fs.methods import FSMLMethod +# +# fsdf = self.import_FSDataFrame() +# training_data, testing_data = fsdf.split_df(split_training_factor=0.7) +# +# estimator_params = {"labelCol": "label"} +# grid_params = {"numTrees": [5, 10], "maxDepth": [3, 5]} +# cv_params = {"parallelism": 2, "numFolds": 3} +# +# ml_method = FSMLMethod( +# fs_method="rf_multilabel", +# rfe=True, +# rfe_iterations=2, +# percent_to_keep=0.9, +# estimator_params=estimator_params, +# evaluator_params={"metricName": "accuracy"}, +# grid_params=grid_params, +# cv_params=cv_params, +# ) +# +# filtered_fsdf = ml_method.select_features(training_data) +# +# training_acc = ml_method.get_eval_metric_on_training() +# print(f"Training accuracy: {training_acc}") +# assert training_acc > 0.8 +# +# testing_acc = ml_method.get_eval_metric_on_testing(testing_data) +# print(f"Testing accuracy: {testing_acc}") +# assert testing_acc > 0.7 +# +# +# if __name__ == "__main__": +# unittest.main()