Skip to content

Commit

Permalink
Autoformat with black
Browse files Browse the repository at this point in the history
  • Loading branch information
brainless-bot[bot] committed Sep 5, 2024
1 parent 7df0b5d commit b524f8b
Show file tree
Hide file tree
Showing 7 changed files with 48 additions and 10 deletions.
8 changes: 7 additions & 1 deletion examples/example_spine_statistics.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,13 @@
from auxiliary.nifti.io import read_nifti
from auxiliary.turbopath import turbopath

from panoptica import Panoptica_Evaluator, Panoptica_Aggregator, InputType, NaiveThresholdMatching, Metric
from panoptica import (
Panoptica_Evaluator,
Panoptica_Aggregator,
InputType,
NaiveThresholdMatching,
Metric,
)
from panoptica.utils import SegmentationClassGroups, LabelGroup
from panoptica.panoptica_statistics import make_curve_over_setups
from pathlib import Path
Expand Down
8 changes: 7 additions & 1 deletion panoptica/_functionals.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,4 +150,10 @@ def _get_paired_crop(


def _round_to_n(value: float | int, n_significant_digits: int = 2):
return value if value == 0 else round(value, -int(math.floor(math.log10(abs(value)))) + (n_significant_digits - 1))
return (
value
if value == 0
else round(
value, -int(math.floor(math.log10(abs(value)))) + (n_significant_digits - 1)
)
)
6 changes: 5 additions & 1 deletion panoptica/instance_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from panoptica.utils.processing_pair import MatchedInstancePair, EvaluateInstancePair
from panoptica._functionals import _get_paired_crop


def evaluate_matched_instance(
matched_instance_pair: MatchedInstancePair,
eval_metrics: list[Metric] = [Metric.DSC, Metric.IOU, Metric.ASSD],
Expand Down Expand Up @@ -38,7 +39,10 @@ def evaluate_matched_instance(
)
ref_matched_labels = matched_instance_pair.matched_instances

instance_pairs = [(reference_arr, prediction_arr, ref_idx, eval_metrics) for ref_idx in ref_matched_labels]
instance_pairs = [
(reference_arr, prediction_arr, ref_idx, eval_metrics)
for ref_idx in ref_matched_labels
]

# metric_dicts: list[dict[Metric, float]] = [_evaluate_instance(*i) for i in instance_pairs]
with Pool() as pool:
Expand Down
5 changes: 4 additions & 1 deletion panoptica/panoptica_aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

COMPUTATION_TIME_KEY = "computation_time"


#
class Panoptica_Aggregator:
# internal_list_lock = Lock()
Expand Down Expand Up @@ -84,7 +85,9 @@ def __init__(
continue_file = True
else:
# TODO should also hash panoptica_evaluator just to make sure! and then save into header of file
assert header_hash == hash("+".join(header_list)), "Hash of header not the same! You are using a different setup!"
assert header_hash == hash(
"+".join(header_list)
), "Hash of header not the same! You are using a different setup!"

if out_buffer_file.exists():
os.remove(out_buffer_file)
Expand Down
13 changes: 11 additions & 2 deletions panoptica/panoptica_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,12 @@
)
import numpy as np
from panoptica.utils.config import SupportsConfig
from panoptica.utils.segmentation_class import SegmentationClassGroups, LabelGroup, _NoSegmentationClassGroups
from panoptica.utils.segmentation_class import (
SegmentationClassGroups,
LabelGroup,
_NoSegmentationClassGroups,
)


class Panoptica_Evaluator(SupportsConfig):

Expand Down Expand Up @@ -132,7 +137,11 @@ def evaluate(
label_group,
processing_pair,
result_all,
save_group_times=self.__save_group_times if save_group_times is None else save_group_times,
save_group_times=(
self.__save_group_times
if save_group_times is None
else save_group_times
),
log_times=log_times,
verbose=verbose,
)[1:]
Expand Down
9 changes: 7 additions & 2 deletions panoptica/panoptica_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,10 +438,15 @@ def __getattribute__(self, __name: str) -> Any:
raise e
if __name == "_evaluation_metrics":
return attr
if object.__getattribute__(self, "_evaluation_metrics") is not None and __name in self._evaluation_metrics.keys():
if (
object.__getattribute__(self, "_evaluation_metrics") is not None
and __name in self._evaluation_metrics.keys()
):
if attr is None:
if self._evaluation_metrics[__name]._error:
raise MetricCouldNotBeComputedException(f"Requested metric {__name} that could not be computed")
raise MetricCouldNotBeComputedException(
f"Requested metric {__name} that could not be computed"
)
elif not self._evaluation_metrics[__name]._was_calculated:
value = self._calc_metric(__name)
setattr(self, __name, value)
Expand Down
9 changes: 7 additions & 2 deletions panoptica/utils/segmentation_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

NO_GROUP_KEY = "ungrouped"


class SegmentationClassGroups(SupportsConfig):
#
def __init__(
Expand Down Expand Up @@ -103,7 +104,9 @@ class _NoSegmentationClassGroups(SegmentationClassGroups):
def __init__(self) -> None:
self.__group_dictionary = {NO_GROUP_KEY: _LabelGroupAny()}

def has_defined_labels_for(self, arr: np.ndarray | list[int], raise_error: bool = False):
def has_defined_labels_for(
self, arr: np.ndarray | list[int], raise_error: bool = False
):
return True

def __str__(self) -> str:
Expand All @@ -124,7 +127,9 @@ def keys(self) -> list[str]:

@property
def labels(self):
raise Exception("_NoSegmentationClassGroups has no explicit definition of labels")
raise Exception(
"_NoSegmentationClassGroups has no explicit definition of labels"
)

@classmethod
def _yaml_repr(cls, node):
Expand Down

0 comments on commit b524f8b

Please sign in to comment.