Skip to content

Commit

Permalink
Bug in tests
Browse files Browse the repository at this point in the history
  • Loading branch information
mgraffg authored Feb 20, 2025
1 parent 7da3f06 commit 361ef43
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 56 deletions.
1 change: 1 addition & 0 deletions CompStats/tests/test_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,7 @@ def test_Perf_statistic():
X_train, X_val, y_train, y_val = _
ens = RandomForestClassifier().fit(X_train, y_train)
perf = Perf(y_val, forest=ens.predict(X_val), num_samples=50)
perf(ens.predict(X_val))
assert 'forest' in perf.statistic


Expand Down
85 changes: 29 additions & 56 deletions CompStats/tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ def test_f1_score():
hy = ens.predict(X_val)
perf = f1_score(y_val, forest=hy,
num_samples=50, average='macro')
assert 'forest' in perf.statistic
assert isinstance(perf.statistic, float)
_ = metrics.f1_score(y_val, hy, average='macro')
assert _ == perf.statistic['forest']
assert _ == perf.statistic
perf = f1_score(y_val, hy, average=None)
assert str(perf) is not None
nb = GaussianNB().fit(X_train, y_train)
Expand All @@ -50,9 +50,8 @@ def test_accuracy_score():
hy = ens.predict(X_val)
perf = accuracy_score(y_val, forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.accuracy_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_balanced_accuracy_score():
Expand All @@ -66,9 +65,8 @@ def test_balanced_accuracy_score():
hy = ens.predict(X_val)
perf = balanced_accuracy_score(y_val, forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.balanced_accuracy_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_top_k_accuracy_score():
Expand All @@ -83,9 +81,8 @@ def test_top_k_accuracy_score():
perf = top_k_accuracy_score(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.top_k_accuracy_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_average_precision_score():
Expand All @@ -100,9 +97,8 @@ def test_average_precision_score():
perf = average_precision_score(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.average_precision_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_brier_score_loss():
Expand All @@ -118,11 +114,8 @@ def test_brier_score_loss():
perf = brier_score_loss(np.where(y_val == 0, 1, 0),
forest=hy,
num_samples=50)
nb = GaussianNB().fit(X_train, y_train)
perf(nb.predict_proba(X_val)[:, 0])
assert 'forest' in perf.statistic
_ = metrics.brier_score_loss(np.where(y_val == 0, 1, 0), hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_log_loss():
Expand All @@ -138,11 +131,8 @@ def test_log_loss():
perf = log_loss(y_val,
forest=hy,
num_samples=50)
nb = GaussianNB().fit(X_train, y_train)
perf(nb.predict_proba(X_val))
assert 'forest' in perf.statistic
_ = metrics.log_loss(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_precision_score():
Expand All @@ -158,9 +148,8 @@ def test_precision_score():
perf = precision_score(y_val,
forest=hy,
num_samples=50, average='macro')
assert 'forest' in perf.statistic
_ = metrics.precision_score(y_val, hy, average='macro')
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_recall_score():
Expand All @@ -176,9 +165,8 @@ def test_recall_score():
perf = recall_score(y_val,
forest=hy,
num_samples=50, average='macro')
assert 'forest' in perf.statistic
_ = metrics.recall_score(y_val, hy, average='macro')
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_jaccard_score():
Expand All @@ -194,9 +182,8 @@ def test_jaccard_score():
perf = jaccard_score(y_val,
forest=hy,
num_samples=50, average='macro')
assert 'forest' in perf.statistic
_ = metrics.jaccard_score(y_val, hy, average='macro')
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_roc_auc_score():
Expand All @@ -212,10 +199,9 @@ def test_roc_auc_score():
perf = roc_auc_score(y_val,
forest=hy, multi_class='ovr',
num_samples=50, average='macro')
assert 'forest' in perf.statistic
_ = metrics.roc_auc_score(y_val, hy, multi_class='ovr',
average='macro')
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_d2_log_loss_score():
Expand All @@ -231,9 +217,8 @@ def test_d2_log_loss_score():
perf = d2_log_loss_score(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.d2_log_loss_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_explained_variance_score():
Expand All @@ -248,9 +233,8 @@ def test_explained_variance_score():
perf = explained_variance_score(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.explained_variance_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_max_error():
Expand All @@ -265,9 +249,8 @@ def test_max_error():
perf = max_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.max_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_mean_absolute_error():
Expand All @@ -282,9 +265,8 @@ def test_mean_absolute_error():
perf = mean_absolute_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.mean_absolute_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_mean_squared_error():
Expand All @@ -299,9 +281,8 @@ def test_mean_squared_error():
perf = mean_squared_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.mean_squared_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_root_mean_squared_error():
Expand All @@ -316,9 +297,8 @@ def test_root_mean_squared_error():
perf = root_mean_squared_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.root_mean_squared_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_mean_squared_log_error():
Expand All @@ -333,9 +313,8 @@ def test_mean_squared_log_error():
perf = mean_squared_log_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.mean_squared_log_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_root_mean_squared_log_error():
Expand All @@ -350,9 +329,8 @@ def test_root_mean_squared_log_error():
perf = root_mean_squared_log_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.root_mean_squared_log_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_median_absolute_error():
Expand All @@ -367,9 +345,8 @@ def test_median_absolute_error():
perf = median_absolute_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.median_absolute_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_r2_score():
Expand All @@ -382,11 +359,11 @@ def test_r2_score():
ens = RandomForestRegressor().fit(X_train, y_train)
hy = ens.predict(X_val)
perf = r2_score(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
forest=hy,
num_samples=50)

_ = metrics.r2_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_mean_poisson_deviance():
Expand All @@ -401,9 +378,8 @@ def test_mean_poisson_deviance():
perf = mean_poisson_deviance(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.mean_poisson_deviance(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_mean_gamma_deviance():
Expand All @@ -418,9 +394,8 @@ def test_mean_gamma_deviance():
perf = mean_gamma_deviance(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.mean_gamma_deviance(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_mean_absolute_percentage_error():
Expand All @@ -435,9 +410,8 @@ def test_mean_absolute_percentage_error():
perf = mean_absolute_percentage_error(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.mean_absolute_percentage_error(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic


def test_d2_absolute_error_score():
Expand All @@ -452,6 +426,5 @@ def test_d2_absolute_error_score():
perf = d2_absolute_error_score(y_val,
forest=hy,
num_samples=50)
assert 'forest' in perf.statistic
_ = metrics.d2_absolute_error_score(y_val, hy)
assert _ == perf.statistic['forest']
assert _ == perf.statistic

0 comments on commit 361ef43

Please sign in to comment.