Skip to content

Commit

Permalink
improved tests
Browse files Browse the repository at this point in the history
  • Loading branch information
FilippoAiraldi committed Jul 31, 2024
1 parent d1f9d0a commit 90cd747
Show file tree
Hide file tree
Showing 10 changed files with 235 additions and 39 deletions.
1 change: 1 addition & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ jobs:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11"]
RUNNING_TESTS: [0, 1]

steps:
- uses: actions/checkout@v4
Expand Down
9 changes: 9 additions & 0 deletions src/globopt/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"""


import os
from typing import Any, Optional, Union

import torch
Expand All @@ -19,6 +20,8 @@
from torch import Tensor
from torch.nn import Module

RUNNING_TESTS = int(os.environ.get("RUNNING_TESTS", "0"))

DELTA = 1e-12
"""Small value to avoid division by zero."""

Expand All @@ -27,6 +30,8 @@ def trace(*args: Any, **kwargs: Any):
"""Applies `torch.jit.trace` to the decorated function."""

def _inner_decorator(func):
if RUNNING_TESTS:
return func
return torch.jit.trace(func, *args, **kwargs)

return _inner_decorator
Expand All @@ -36,6 +41,8 @@ def script(*args: Any, **kwargs: Any):
"""Applies `torch.jit.script` to the decorated function."""

def _inner_decorator(func):
if RUNNING_TESTS:
return func
return torch.jit.script(func, *args, **kwargs)

return _inner_decorator
Expand Down Expand Up @@ -141,6 +148,7 @@ def _rbf_partial_fit(
Minv_new_shape = X.shape[:-2] + (X.shape[-2], X.shape[-2])
Minv_new = torch.empty(Minv_new_shape, device=Minv.device, dtype=Minv.dtype)

# block inversion
Cinv = torch.linalg.inv(C[mask, :, :])
L = L[mask, :, :]
B = -L @ Cinv
Expand All @@ -151,6 +159,7 @@ def _rbf_partial_fit(
(torch.cat((A, B), -1), torch.cat((B.mT, Cinv), -1)), -2
)

# full inversion
not_mask = ~mask
X_old_nm = X[..., :n, :][not_mask, :, :]
_, M = _cdist_and_inverse_quadratic_kernel(X_old_nm, X_old_nm, eps)
Expand Down
Binary file removed tests/data_test_core.mat
Binary file not shown.
Binary file modified tests/data_test_myopic.pkl
Binary file not shown.
Binary file added tests/data_test_regression.pkl
Binary file not shown.
62 changes: 46 additions & 16 deletions tests/test_myopic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,39 +3,69 @@

import torch

from globopt.myopic_acquisitions import (
from globopt import (
GaussHermiteSampler,
IdwAcquisitionFunction,
_idw_distance,
idw_acquisition_function,
Rbf,
qIdwAcquisitionFunction,
)
from globopt.myopic_acquisitions import _idw_distance, idw_acquisition_function
from globopt.problems import SimpleProblem
from globopt.regression import Rbf

with open(r"tests/data_test_myopic.pkl", "rb") as f:
RESULTS = pickle.load(f)


class TestAcquisitionFunction(unittest.TestCase):
def test__methods__returns_correct_values(self):
def test_IdwAcquisitionFunction__returns_correct_values(self):
problem = SimpleProblem()
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2], device="cpu").unsqueeze(-1)
Y = problem(X)

mdl = Rbf(X, Y, 0.5)
x = torch.linspace(-3, 3, 1000, dtype=X.dtype).view(1, -1, 1)
x = torch.linspace(-3, 3, RESULTS["N"], dtype=X.dtype).view(1, -1, 1)
MAF = IdwAcquisitionFunction(mdl, 1.0, 0.5)
a1 = MAF(x.transpose(1, 0)).squeeze().neg()

y_hat, s, W_sum_recipr, _ = mdl(x)
# compute the acquisition function with N-batches 1x1 input
acqfun1 = MAF(x.transpose(1, 0))

# compute the same acquisition function with a single Nx1 input
y_hat, scale, W_sum_recipr, _ = mdl(x)
dym = Y.amax(-2) - Y.amin(-2)
z = _idw_distance(W_sum_recipr)
a2 = idw_acquisition_function(y_hat, s, dym, W_sum_recipr, MAF.c1, MAF.c2).neg()

out = torch.stack((s.squeeze(), z.squeeze(), a2.squeeze()))
out_expected = torch.as_tensor(RESULTS["acquisitions"], dtype=a1.dtype)
torch.testing.assert_close(a1, a2.squeeze(), rtol=1e-5, atol=1e-3)
torch.testing.assert_close(a1, out_expected[-1], rtol=1e-5, atol=1e-3)
torch.testing.assert_close(out, out_expected, rtol=1e-5, atol=1e-3)
dist = _idw_distance(W_sum_recipr)
acqfun2 = idw_acquisition_function(
y_hat, scale, dym, W_sum_recipr, MAF.c1, MAF.c2
)

torch.testing.assert_close(
acqfun1.flatten(), acqfun2.flatten(), rtol=1e-4, atol=1e-4
)
torch.testing.assert_close(
acqfun1.flatten(),
torch.as_tensor(RESULTS["acquisition"], dtype=acqfun1.dtype),
)
torch.testing.assert_close(
scale.flatten(), torch.as_tensor(RESULTS["scale"], dtype=scale.dtype)
)
torch.testing.assert_close(
dist.flatten(), torch.as_tensor(RESULTS["distance"], dtype=dist.dtype)
)

def test_qIdwAcquisitionFunction__returns_correct_values(self):
problem = SimpleProblem()
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2], device="cpu").unsqueeze(-1)
Y = problem(X)

mdl = Rbf(X, Y, 0.5)
x = torch.linspace(-3, 3, RESULTS["N"], dtype=X.dtype).view(1, -1, 1)
sampler = GaussHermiteSampler(torch.Size([RESULTS["gh_samples"]]))
MAF = qIdwAcquisitionFunction(mdl, 1.0, 0.5, sampler)
acqfun = MAF(x.transpose(1, 0)).squeeze().neg()

torch.testing.assert_close(
acqfun.flatten(),
torch.as_tensor(RESULTS["qacquisition"], dtype=acqfun.dtype),
)


if __name__ == "__main__":
Expand Down
55 changes: 55 additions & 0 deletions tests/test_nonmyopic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import unittest

import torch
from botorch.acquisition import ExpectedImprovement, qExpectedImprovement

from globopt import GaussHermiteSampler, Ms, Rbf, make_idw_acq_factory
from globopt.problems import SimpleProblem


class TestNonMyopicAcquisitionFunction(unittest.TestCase):
def test_make_idw_acq_factory(self):
c1, c2, span_Y_min = 1.0, 0.5, 1e-3
factory = make_idw_acq_factory(c1, c2, span_Y_min)
self.assertTrue(callable(factory))
self.assertDictEqual(factory(), {"c1": c1, "c2": c2, "span_Y_min": span_Y_min})

def test_init__overrides_default_samplers__with_base_MC_acq_func(self):
problem = SimpleProblem()
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2], device="cpu").unsqueeze(-1)
Y = problem(X)
mdl = Rbf(X, Y, 0.5)
fantasies_samplers = [GaussHermiteSampler(torch.Size([1]))]
valfunc_sampler = GaussHermiteSampler(torch.Size([16]))

acqfun = Ms(
mdl,
fantasies_samplers,
qExpectedImprovement,
valfunc_sampler=valfunc_sampler,
)

self.assertTrue(all(s is valfunc_sampler for s in acqfun.inner_samplers))

def test_init__does_not_override_default_samplers__with_base_analytical_acq_func(
self,
):
problem = SimpleProblem()
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2], device="cpu").unsqueeze(-1)
Y = problem(X)
mdl = Rbf(X, Y, 0.5)
fantasies_samplers = [GaussHermiteSampler(torch.Size([1]))]
valfunc_sampler = GaussHermiteSampler(torch.Size([16]))

acqfun = Ms(
mdl,
fantasies_samplers,
ExpectedImprovement,
valfunc_sampler=valfunc_sampler,
)

self.assertTrue(all(s is None for s in acqfun.inner_samplers))


if __name__ == "__main__":
unittest.main()
8 changes: 3 additions & 5 deletions tests/test_problems.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,15 +95,13 @@ def test_optimal_value_and_point(self, cls: type):
actual, expected, rtol=1e-4, atol=1e-6, msg=f"{name} f_opt"
)

if (
not isinstance(problem, HyperTuningGridTestFunction)
and problem._optimizers is not None
):
if problem._optimizers is not None:
tol = 1e0 if isinstance(problem, HyperTuningGridTestFunction) else 1e-4
for i, x_opt in enumerate(problem._optimizers):
f_computed = problem(torch.as_tensor(x_opt))
expected_ = torch.as_tensor(expected).view_as(f_computed).to(f_computed)
torch.testing.assert_close(
f_computed, expected_, rtol=1e-4, atol=1e-6, msg=f"{name} x_opt {i}"
f_computed, expected_, rtol=tol, atol=tol, msg=f"{name} x_opt {i}"
)


Expand Down
88 changes: 70 additions & 18 deletions tests/test_regression.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,85 @@
import pickle
import unittest

import torch
from scipy.io import loadmat
from parameterized import parameterized

from globopt import Idw, Rbf
from globopt.problems import SimpleProblem
from globopt.regression import Idw, Rbf

RESULTS = loadmat("tests/data_test_core.mat")
with open(r"tests/data_test_regression.pkl", "rb") as f:
RESULTS = pickle.load(f)


class TestRegression(unittest.TestCase):
def test_fit_and_partial_fit(self) -> None:
def test_fit__and__partial_fit(self) -> None:
problem = SimpleProblem()
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2], device="cpu").unsqueeze(-1)
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2], device="cpu").view(-1, 1)
Y = problem(X)
n = 3
mdls = [Idw(X[:n], Y[:n]), Rbf(X[:n], Y[:n], eps=0.5, svd_tol=0.0)]
for i in range(len(mdls)):
mdl = mdls[i]
if isinstance(mdl, Idw):
mdls[i] = Idw(X, Y)
else:
mdls[i] = Rbf(X, Y, mdl.eps, mdl.svd_tol, mdl.state)
x_hat = torch.linspace(-3, 3, 100, dtype=X.dtype).view(1, -1, 1)
y_hat = torch.stack([mdl.posterior(x_hat).mean.squeeze() for mdl in mdls])
y_hat_expected = torch.as_tensor(RESULTS["y_hat"][:2], dtype=y_hat.dtype)
# NOTE: we only take the first 2 rows of y_hat because the third was computed
# with a kernel that was later removed.
torch.testing.assert_close(y_hat, y_hat_expected)

# fit
idw_mdl = Idw(X[..., :n, :], Y[..., :n, :])
rbf_mdl = Rbf(X[..., :n, :], Y[..., :n, :], eps=0.5, svd_tol=0.0)

# partial fit
idw_mdl = Idw(X, Y)
rbf_mdl = Rbf(X, Y, rbf_mdl.eps, rbf_mdl.svd_tol, rbf_mdl.state)

x_hat = torch.linspace(-3, 3, RESULTS["N"], dtype=X.dtype).view(1, -1, 1)
y_hat_idw = idw_mdl.posterior(x_hat).mean.squeeze()
y_hat_rbf = rbf_mdl.posterior(x_hat).mean.squeeze()
torch.testing.assert_close(
y_hat_idw, torch.as_tensor(RESULTS["idw"], dtype=y_hat_idw.dtype), msg="idw"
)
torch.testing.assert_close(
y_hat_rbf, torch.as_tensor(RESULTS["rbf"], dtype=y_hat_rbf.dtype), msg="rbf"
)

def test_fit__and__partial_fit__with_repeated_datapoint(self) -> None:
# when a datapoint is repeated, IDW is expected not to be bothered by it, but
# its output will change. For RBF not to fail instead we have to set svd_tol,
# and it will ignore the new point and the output will be the same
problem = SimpleProblem()
X = torch.as_tensor([-2.61, -1.92, -0.63, 0.38, 2, 2], device="cpu")
X = X.view(1, -1, 1) # to avoid issues with torch.script in rbf
Y = problem(X)
n = 3

# fit
idw_mdl = Idw(X[..., :n, :], Y[..., :n, :])
rbf_mdl = Rbf(X[..., :n, :], Y[..., :n, :], eps=0.5, svd_tol=1e-8)

# partial fit
idw_mdl = Idw(X, Y)
rbf_mdl = Rbf(X, Y, rbf_mdl.eps, rbf_mdl.svd_tol, rbf_mdl.state)

x_hat = torch.linspace(-3, 3, RESULTS["N"], dtype=X.dtype).view(1, -1, 1)
y_hat_idw = idw_mdl.posterior(x_hat).mean.squeeze()
y_hat_rbf = rbf_mdl.posterior(x_hat).mean.squeeze()
torch.testing.assert_close(
y_hat_idw,
torch.as_tensor(RESULTS["idw_repeated"], dtype=y_hat_idw.dtype),
msg="idw",
)
torch.testing.assert_close(
y_hat_rbf, torch.as_tensor(RESULTS["rbf"], dtype=y_hat_rbf.dtype), msg="rbf"
)

@parameterized.expand([(Rbf,), (Idw,)])
def test_condition_on_observations__works_as_intended(self, cls: type):
dim, N, M, fantasies = torch.randint(2, 10, (4,))
X_train = torch.randn((N, dim))
Y_train = torch.randn((N, 1))
mdl = cls(X_train, Y_train)

X = torch.randn((fantasies, M, dim))
Y = torch.randn((fantasies, M, 1))
mdl_new = mdl.condition_on_observations(X, Y)

self.assertIsInstance(mdl_new, cls)
self.assertEqual(mdl_new.train_X.shape, (fantasies, N + M, dim))
self.assertEqual(mdl_new.train_Y.shape, (fantasies, N + M, 1))


if __name__ == "__main__":
Expand Down
51 changes: 51 additions & 0 deletions tests/test_sampling.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import unittest
from math import pi, sqrt

import torch
from botorch.posteriors import GPyTorchPosterior
from gpytorch.distributions import MultivariateNormal

from globopt import GaussHermiteSampler


class TestSampling(unittest.TestCase):
def test_GH__supports_only_single_dim(self) -> None:
with self.assertRaisesRegex(
AssertionError, "Only a single dimension is supported."
):
GaussHermiteSampler(torch.Size([1000, 100]))

def test_GH__returns_correct_base_samples(self) -> None:
# with such posterior, the base samples are returned as they are, i.e., 1*s + 0
distribution = MultivariateNormal(torch.zeros((1,)), torch.eye(1))
posterior = GPyTorchPosterior(distribution)
SQRT2 = sqrt(2.0)
SQRTPI = sqrt(pi)
EXPECTED = {
2: ([-0.707107, 0.707107], [0.886227, 0.886227]),
3: ([-1.22474, 0, 1.22474], [0.295409, 1.18164, 0.295409]),
4: (
[-1.65068, -0.524648, 0.524648, 1.65068],
[0.0813128, 0.804914, 0.804914, 0.0813128],
),
5: (
[-2.02018, -0.958572, 0, 0.958572, 2.02018],
[0.0199532, 0.393619, 0.945309, 0.393619, 0.0199532],
),
}

for n, (samples, weights) in EXPECTED.items():
sampler = GaussHermiteSampler(torch.Size([n]))
actual_samples = sampler(posterior)
torch.testing.assert_close(
actual_samples.flatten(), torch.tensor(samples) * SQRT2, msg=f"{n}"
)
torch.testing.assert_close(
sampler.base_weights.flatten(),
torch.tensor(weights) / SQRTPI,
msg=f"{n}",
)


if __name__ == "__main__":
unittest.main()

0 comments on commit 90cd747

Please sign in to comment.