Skip to content

Commit

Permalink
Merge pull request #8 from rottenstea/devel
Browse files Browse the repository at this point in the history
Extending the tests to further parts of the code.
  • Loading branch information
rottenstea authored Jan 31, 2024
2 parents ddc4588 + 3554284 commit 0e9e26a
Show file tree
Hide file tree
Showing 4 changed files with 275 additions and 2 deletions.
Binary file modified .coverage
Binary file not shown.
13 changes: 12 additions & 1 deletion EmpiricalArchive/IsoModulator/Simulation_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ def add_parallax_uncertainty(self, delta_plx: float):
:param delta_plx: Uncertainty fraction of the parallax.
:return: None
"""
if delta_plx < 0:
delta_plx = -delta_plx

lower_bound = -delta_plx
upper_bound = delta_plx
Expand All @@ -118,7 +120,12 @@ def add_parallax_uncertainty(self, delta_plx: float):
plx = 1000 / self.mean_distance

# add the parallax uncertainties sampled from the normal distribution bounded by delta plx
new_dist = 1000 / (plx + plx * normal_distribution)
new_plx = plx + plx * normal_distribution
new_dist = 1000 / new_plx

# Check if any value in new_dist is negative
if np.any(new_plx < 0):
raise ValueError("Negative values in new parallaxes detected")

# print(self.mean_distance, np.mean(new_dist), np.std(new_dist), max(new_dist), min(new_dist))

Expand All @@ -135,6 +142,8 @@ def add_binary_fraction(self, binarity_frac: float):
:param binarity_frac: Fraction of unresolved binaries [0,1].
:return: None
"""
if binarity_frac < 0 or binarity_frac > 1:
raise ValueError("Fraction needs to be between 0 and 1.")

binary_frame = self.abs_mag_incl_plx.copy()
# Randomly sample 30% of the elements
Expand Down Expand Up @@ -184,6 +193,8 @@ def add_field_contamination(self, contamination_frac: float,
:param field_data_path: Path to the field data to sample from (default: 1% of Gaia DR3 sources within 500 pc).
:return: None
"""
if contamination_frac < 0 or contamination_frac > 1:
raise ValueError("Fraction needs to be between 0 and 1.")

# load slimmed catalog
data = pd.read_csv(field_data_path)
Expand Down
19 changes: 19 additions & 0 deletions test/test_Classfile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from EmpiricalArchive.Extraction.Classfile import *
import numpy as np


def test_abs_mag_error():
# Test with known values
w = 10.0
delta_w = 0.1
delta_m = 0.05
expected_result = np.sqrt((5 / (np.log(10) * w) * delta_w) ** 2 + delta_m ** 2)
assert np.isclose(abs_mag_error(w, delta_w, delta_m), expected_result)


def test_RSS():
# Test with known values
e1 = 0.1
e2 = 0.2
expected_result = np.sqrt(e1 ** 2 + e2 ** 2)
assert np.isclose(RSS(e1, e2), expected_result)
245 changes: 244 additions & 1 deletion test/test_Simulation_functions.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np
import pytest
import pandas as pd

import matplotlib.pyplot as plt
import sys

sys.path.append('/Users/alena/PycharmProjects/PaperI/')
Expand Down Expand Up @@ -105,3 +105,246 @@ def test_set_CMD_type_invalid(initialized_class_object):
obj.set_CMD_type(4)


def test_add_parallax_uncertainty(initialized_class_object):
obj = initialized_class_object
obj.set_CMD_type(1)
delta_plx = 0.1
obj.add_parallax_uncertainty(delta_plx)
assert isinstance(obj.abs_mag_incl_plx, pd.Series)
assert len(obj.abs_mag_incl_plx) == obj.num_simulated_stars


def test_add_parallax_uncertainty_zero_delta_plx(initialized_class_object):
delta_plx = 0
obj = initialized_class_object
obj.set_CMD_type(1)
obj.add_parallax_uncertainty(delta_plx)
assert isinstance(obj.abs_mag_incl_plx, pd.Series)
assert len(obj.abs_mag_incl_plx) == obj.num_simulated_stars


def test_add_parallax_uncertainty_no_negative_distribution(initialized_class_object):
delta_plx = 100
obj = initialized_class_object
obj.mean_distance = 100
obj.set_CMD_type(1)
with pytest.raises(ValueError, match="Negative values in new parallaxes detected"):
obj.add_parallax_uncertainty(delta_plx)


def test_add_binary_fraction_unallowed_vals(initialized_class_object):
obj = initialized_class_object
binarity_frac = -0.1
with pytest.raises(ValueError, match="Fraction needs to be between 0 and 1."):
obj.add_binary_fraction(binarity_frac)
binarity_frac = 1.1
with pytest.raises(ValueError, match="Fraction needs to be between 0 and 1."):
obj.add_binary_fraction(binarity_frac)


def test_add_binary_fraction(initialized_class_object):
binarity_frac = 0.3
obj = initialized_class_object
obj.abs_mag_incl_plx = pd.Series(np.random.randn(100)) # Mock abs_mag_incl_plx
original_abs_mag_incl_plx = obj.abs_mag_incl_plx.copy()

obj.add_binary_fraction(binarity_frac)

# Find indices where elements in second_array are identical to first_array
matching_indices = np.where(obj.abs_mag_incl_plx_binarity == original_abs_mag_incl_plx)[0]
matching_elements = original_abs_mag_incl_plx[matching_indices]
assert np.all(np.isin(matching_elements, original_abs_mag_incl_plx))

# Find indices where elements in second_array are different from first_array
non_matching_indices = np.where(obj.abs_mag_incl_plx_binarity != original_abs_mag_incl_plx)[0]
# Calculate the difference between corresponding elements in the two arrays
differences = obj.abs_mag_incl_plx_binarity - original_abs_mag_incl_plx
# Filter differences corresponding to the non-matching indices
non_matching_differences = differences[non_matching_indices]
# Check if the absolute differences are close to 0.753
tolerance = 0.01 # Adjust tolerance as needed
assert np.allclose(np.abs(non_matching_differences), 0.753, atol=tolerance)


def test_add_binary_fraction_zero(initialized_class_object):
binarity_frac = 0
initialized_class_object.abs_mag_incl_plx = pd.Series(np.random.randn(100)) # Mocking abs_mag_incl_plx
original_abs_mag_incl_plx = initialized_class_object.abs_mag_incl_plx.copy()

initialized_class_object.add_binary_fraction(binarity_frac)

assert original_abs_mag_incl_plx.equals(initialized_class_object.abs_mag_incl_plx_binarity)


def test_add_extinction(initialized_class_object):
extinction_level = 0.5
obj = initialized_class_object
obj.set_CMD_type(1)
obj.add_parallax_uncertainty(0.1)
obj.add_binary_fraction(0.3)

# make into dataframe
binary_df = pd.DataFrame(data=np.stack([obj.bp_rp, obj.abs_mag_incl_plx_binarity], axis=1),
columns=obj.cols)

obj.add_extinction(extinction_level)

# Check if absolute magnitude is correctly modified
assert np.allclose(binary_df[obj.cols[1]] + extinction_level,
obj.abs_mag_incl_plx_binarity_extinction[obj.cols[1]])

# Calculate expected color index after applying extinction
expected_color_index = binary_df[obj.cols[0]] + (1.212 - 0.76) * extinction_level

# Check if color index is correctly modified
assert np.allclose(expected_color_index,
obj.abs_mag_incl_plx_binarity_extinction[obj.cols[0]])


def test_add_field_unallowed_vals(initialized_class_object):
obj = initialized_class_object
binarity_frac = -0.1
with pytest.raises(ValueError, match="Fraction needs to be between 0 and 1."):
obj.add_field_contamination(binarity_frac)
binarity_frac = 1.1
with pytest.raises(ValueError, match="Fraction needs to be between 0 and 1."):
obj.add_field_contamination(binarity_frac)


def test_add_field_contamination_sampling(initialized_class_object):

contamination_frac = 0.9
obj = initialized_class_object
obj.set_CMD_type(1)
obj.add_parallax_uncertainty(0.1)
obj.add_binary_fraction(0.3)
obj.add_extinction(0.5)
obj.add_field_contamination(contamination_frac)

# Check if the correct number of entries is sampled
assert (len(obj.abs_mag_incl_plx_binarity_extinction_field["Gmag"]) -
len(obj.abs_mag_incl_plx_binarity_extinction["Gmag"])) == 180 # 90% of 200 (dummy data)


def test_add_field_contamination_conversion(initialized_class_object):

contamination_frac = 0.7
obj = initialized_class_object
obj.set_CMD_type(1)
obj.add_parallax_uncertainty(0.1)
obj.add_binary_fraction(0.3)
obj.add_extinction(0.5)
obj.add_field_contamination(contamination_frac)

# Check if the conversion is done correctly for the sampled field data
assert all(col in obj.abs_mag_incl_plx_binarity_extinction_field.columns
for col in ['BP-RP', 'Gmag'])


def test_add_field_contamination_merging(initialized_class_object):

contamination_frac = 0.7
obj = initialized_class_object

obj.set_CMD_type(1)
obj.add_parallax_uncertainty(0.1)
obj.add_binary_fraction(0.3)
obj.add_extinction(0.5)
obj.add_field_contamination(contamination_frac)

# Check if the merging is done correctly
assert len(obj.abs_mag_incl_plx_binarity_extinction_field) == 340 # 200 (mock) + 200*0.7


def test_simulate_calls_add_methods_correctly(initialized_class_object):
# Sample uncertainties
uncertainties = [0.1, 0.2, 0.3, 0.4]
obj = initialized_class_object
obj.set_CMD_type(1)

# Call simulate method
result = obj.simulate(uncertainties)

# Verify that add_ methods are called with correct uncertainties
assert obj.abs_mag_incl_plx_binarity_extinction_field is not None

# Verify the result is a DataFrame
assert isinstance(result, pd.DataFrame)

# Assert that expected columns are present in the resulting DataFrame
expected_columns = ['BP-RP', 'Gmag']
for col in expected_columns:
assert col in result.columns


def test_plot_verification_returns_figure_and_axes(initialized_class_object):
uncertainties = [0.1, 0.2, 0.3, 0.4]
obj = initialized_class_object
obj.set_CMD_type(1)
obj.add_parallax_uncertainty(uncertainties[0])
obj.add_binary_fraction(uncertainties[1])
obj.add_extinction(uncertainties[2])
obj.add_field_contamination(uncertainties[3])
fig, axes = obj.plot_verification(uncertainties)

# Verify the return types
assert isinstance(fig, plt.Figure)
assert isinstance(axes, np.ndarray)
assert axes.shape == (6, ) # Assuming 2x3 subplots


def test_plot_verification_plots_correct_data(initialized_class_object):
uncertainties = [0.1, 0.2, 0.3, 0.4]
obj = initialized_class_object
obj.set_CMD_type(1)
obj.add_parallax_uncertainty(uncertainties[0])
obj.add_binary_fraction(uncertainties[1])
obj.add_extinction(uncertainties[2])
obj.add_field_contamination(uncertainties[3])
fig, axes = obj.plot_verification(uncertainties)

# Verify that each subplot contains the expected data

# Test original subplot
ax_original = axes[0]
x_original = obj.cax
y_original = obj.abs_G
original_scatter = ax_original.collections[0] # Assuming scatter plot is the only collection
assert np.array_equal(original_scatter.get_offsets(), np.column_stack((x_original, y_original)))

# Test plx subplot
ax_plx_uncertainty = axes[1]
x_plx_uncertainty = obj.cax
y_plx_uncertainty = obj.abs_mag_incl_plx
plx_uncertainty_scatter = ax_plx_uncertainty.collections[0] # Assuming scatter plot is the only collection
assert np.array_equal(plx_uncertainty_scatter.get_offsets(), np.column_stack((x_plx_uncertainty, y_plx_uncertainty)))

# Test binary subplot
ax_bin_uncertainty = axes[2]
x_bin_uncertainty = obj.cax
y_bin_uncertainty = obj.abs_mag_incl_plx_binarity
bin_uncertainty_scatter = ax_bin_uncertainty.collections[0] # Assuming scatter plot is the only collection
assert np.array_equal(bin_uncertainty_scatter.get_offsets(), np.column_stack((x_bin_uncertainty, y_bin_uncertainty)))

# Test Av subplot
ax_Av_uncertainty = axes[3]
x_Av_uncertainty = obj.abs_mag_incl_plx_binarity_extinction[obj.cols[0]]
y_Av_uncertainty = obj.abs_mag_incl_plx_binarity_extinction[obj.cols[1]]
Av_uncertainty_scatter = ax_Av_uncertainty.collections[0] # Assuming scatter plot is the only collection
assert np.array_equal(Av_uncertainty_scatter.get_offsets(), np.column_stack((x_Av_uncertainty, y_Av_uncertainty)))

# Test field subplot - for some reason broken2
# ax_f_uncertainty = axes[4]
# x_f_uncertainty = obj.abs_mag_incl_plx_binarity_extinction_field[obj.cols[0]]
# y_f_uncertainty = obj.abs_mag_incl_plx_binarity_extinction_field[obj.cols[1]]
# f_uncertainty_scatter = ax_f_uncertainty.collections[0] # Assuming scatter plot is the only collection
# assert np.array_equal(f_uncertainty_scatter.get_offsets(), np.column_stack((x_f_uncertainty, y_f_uncertainty)))

plt.close(fig)







0 comments on commit 0e9e26a

Please sign in to comment.