diff --git a/docs/source/api.rst b/docs/source/api.rst index 20d1a0ea..cd778cb2 100644 --- a/docs/source/api.rst +++ b/docs/source/api.rst @@ -259,7 +259,6 @@ Others :nosignatures: :template: class.rst - AUSE GroupingLoss Regression @@ -294,6 +293,18 @@ Segmentation MeanIntersectionOverUnion +Others +^^^^^^ + +.. currentmodule:: torch_uncertainty.metrics + +.. autosummary:: + :toctree: generated/ + :nosignatures: + :template: class.rst + + AUSE + Losses ------ diff --git a/docs/source/conf.py b/docs/source/conf.py index 418b398a..c5c3b996 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,7 +15,7 @@ f"{datetime.now().year!s}, Adrien Lafage and Olivier Laurent" ) author = "Adrien Lafage and Olivier Laurent" -release = "0.2.2" +release = "0.2.2.post0" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 68fc7fd0..b81eb766 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -160,10 +160,11 @@ backbone with the following code: .. code:: python - from torch_uncertainty.models.resnet import packed_resnet18 + from torch_uncertainty.models.resnet import packed_resnet - model = packed_resnet18( + model = packed_resnet( in_channels = 3, + arch=18, num_estimators = 4, alpha = 2, gamma = 2, diff --git a/docs/source/references.rst b/docs/source/references.rst index 490e080a..5e6c7425 100644 --- a/docs/source/references.rst +++ b/docs/source/references.rst @@ -254,7 +254,7 @@ For the conflictual loss, consider citing: **On the Calibration of Epistemic Uncertainty: Principles, Paradoxes and Conflictual Loss** * Authors: *Mohammed Fellaji, Frédéric Pennerath, Brieuc Conan-Guez, and Miguel Couceiro* -* Paper: `ArXiv 2024 `__. Metrics ------- diff --git a/pyproject.toml b/pyproject.toml index 60ee19af..c82abe13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "flit_core.buildapi" [project] name = "torch_uncertainty" -version = "0.2.2" +version = "0.2.2.post0" authors = [ { name = "ENSTA U2IS", email = "olivier.laurent@ensta-paris.fr" }, { name = "Adrien Lafage", email = "adrienlafage@outlook.com" }, diff --git a/pyrightconfig.json b/pyrightconfig.json deleted file mode 100644 index 5675fd51..00000000 --- a/pyrightconfig.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "include": [ - "torch_uncertainty", - "tests" - ], - - "exclude": [ - "**/__pycache__", - ], - - "pythonVersion": "3.10", - "pythonPlatform": "Linux", - } diff --git a/tests/metrics/classification/test_sparsification.py b/tests/metrics/test_sparsification.py similarity index 100% rename from tests/metrics/classification/test_sparsification.py rename to tests/metrics/test_sparsification.py diff --git a/tests/test_utils.py b/tests/test_utils.py index 69c17b1d..e6aceb06 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -2,7 +2,7 @@ import pytest import torch -from huggingface_hub.utils._errors import ( +from huggingface_hub.errors import ( HfHubHTTPError, RepositoryNotFoundError, ) diff --git a/torch_uncertainty/baselines/classification/resnet.py b/torch_uncertainty/baselines/classification/resnet.py index 00ea94ce..15d1655d 100644 --- a/torch_uncertainty/baselines/classification/resnet.py +++ b/torch_uncertainty/baselines/classification/resnet.py @@ -1,6 +1,7 @@ from typing import Literal from torch import nn +from torch.optim import Optimizer from torch_uncertainty.models import mc_dropout from torch_uncertainty.models.resnet import ( @@ -55,6 +56,7 @@ def __init__( normalization_layer: type[nn.Module] = nn.BatchNorm2d, num_estimators: int = 1, dropout_rate: float = 0.0, + optim_recipe: dict | Optimizer | None = None, mixup_params: dict | None = None, last_layer_dropout: bool = False, width_multiplier: float = 1.0, @@ -229,6 +231,7 @@ def __init__( model=model, loss=loss, is_ensemble=version in ENSEMBLE_METHODS, + optim_recipe=optim_recipe, format_batch_fn=format_batch_fn, mixup_params=mixup_params, eval_ood=eval_ood, diff --git a/torch_uncertainty/baselines/classification/vgg.py b/torch_uncertainty/baselines/classification/vgg.py index 7375a082..f9c29323 100644 --- a/torch_uncertainty/baselines/classification/vgg.py +++ b/torch_uncertainty/baselines/classification/vgg.py @@ -1,6 +1,7 @@ from typing import Literal from torch import nn +from torch.optim import Optimizer from torch_uncertainty.models import mc_dropout from torch_uncertainty.models.vgg import ( @@ -32,6 +33,7 @@ def __init__( num_estimators: int = 1, dropout_rate: float = 0.0, last_layer_dropout: bool = False, + optim_recipe: dict | Optimizer | None = None, mixup_params: dict | None = None, groups: int = 1, alpha: int | None = None, @@ -52,6 +54,10 @@ def __init__( num_classes (int): Number of classes to predict. in_channels (int): Number of input channels. loss (nn.Module): Training loss. + optim_recipe (Any): optimization recipe, corresponds to + what expect the `LightningModule.configure_optimizers() + `_ + method. version (str): Determines which VGG version to use: @@ -164,6 +170,7 @@ def __init__( loss=loss, is_ensemble=version in ENSEMBLE_METHODS, format_batch_fn=format_batch_fn, + optim_recipe=optim_recipe, mixup_params=mixup_params, eval_ood=eval_ood, ood_criterion=ood_criterion, diff --git a/torch_uncertainty/baselines/classification/wideresnet.py b/torch_uncertainty/baselines/classification/wideresnet.py index 78abe960..b1086200 100644 --- a/torch_uncertainty/baselines/classification/wideresnet.py +++ b/torch_uncertainty/baselines/classification/wideresnet.py @@ -1,6 +1,7 @@ from typing import Literal from torch import nn +from torch.optim import Optimizer from torch_uncertainty.models import mc_dropout from torch_uncertainty.models.wideresnet import ( @@ -39,6 +40,7 @@ def __init__( style: str = "imagenet", num_estimators: int = 1, dropout_rate: float = 0.0, + optim_recipe: dict | Optimizer | None = None, mixup_params: dict | None = None, groups: int = 1, last_layer_dropout: bool = False, @@ -186,6 +188,7 @@ def __init__( loss=loss, is_ensemble=version in ENSEMBLE_METHODS, format_batch_fn=format_batch_fn, + optim_recipe=optim_recipe, mixup_params=mixup_params, eval_ood=eval_ood, eval_grouping_loss=eval_grouping_loss, diff --git a/torch_uncertainty/datamodules/segmentation/cityscapes.py b/torch_uncertainty/datamodules/segmentation/cityscapes.py index ea4bea8e..ad583664 100644 --- a/torch_uncertainty/datamodules/segmentation/cityscapes.py +++ b/torch_uncertainty/datamodules/segmentation/cityscapes.py @@ -105,7 +105,6 @@ def __init__( pin_memory=pin_memory, persistent_workers=persistent_workers, ) - self.dataset = Cityscapes self.mode = "fine" self.crop_size = _pair(crop_size) diff --git a/torch_uncertainty/datasets/segmentation/cityscapes.py b/torch_uncertainty/datasets/segmentation/cityscapes.py index 97e48ef0..a30aa813 100644 --- a/torch_uncertainty/datasets/segmentation/cityscapes.py +++ b/torch_uncertainty/datasets/segmentation/cityscapes.py @@ -1,3 +1,4 @@ +from collections.abc import Callable from typing import Any import torch @@ -10,7 +11,35 @@ class Cityscapes(TVCityscapes): - def encode_target(self, target: Image.Image) -> Image.Image: + def __init__( + self, + root: str, + split: str = "train", + mode: str = "fine", + target_type: torch.List[str] | str = "instance", + transform: Callable[..., Any] | None = None, + target_transform: Callable[..., Any] | None = None, + transforms: Callable[..., Any] | None = None, + ) -> None: + super().__init__( + root, + split, + mode, + target_type, + transform, + target_transform, + transforms, + ) + train_id_to_color = [ + c.color + for c in self.classes + if (c.train_id != -1 and c.train_id != 255) + ] + train_id_to_color.append([0, 0, 0]) + self.train_id_to_color = torch.tensor(train_id_to_color) + + @classmethod + def encode_target(cls, target: Image.Image) -> Image.Image: """Encode target image to tensor. Args: @@ -23,7 +52,7 @@ def encode_target(self, target: Image.Image) -> Image.Image: colored_target = rearrange(colored_target, "c h w -> h w c") target = torch.zeros_like(colored_target[..., :1]) # convert target color to index - for cityscapes_class in self.classes: + for cityscapes_class in cls.classes: target[ ( colored_target @@ -33,6 +62,18 @@ def encode_target(self, target: Image.Image) -> Image.Image: return F.to_pil_image(rearrange(target, "h w c -> c h w")) + def decode_target(self, target: torch.Tensor) -> torch.Tensor: + """Decode target tensor to RGB tensor. + + Args: + target (torch.Tensor): Target RGB tensor. + + Returns: + Image.Image: Decoded target. + """ + target[target == 255] = -1 + return self.train_id_to_color[target] + def __getitem__(self, index: int) -> tuple[Any, Any]: """Get the sample at the given index. diff --git a/torch_uncertainty/metrics/__init__.py b/torch_uncertainty/metrics/__init__.py index 52e55366..2f292f56 100644 --- a/torch_uncertainty/metrics/__init__.py +++ b/torch_uncertainty/metrics/__init__.py @@ -2,7 +2,6 @@ from .classification import ( AUGRC, AURC, - AUSE, FPR95, AdaptiveCalibrationError, BrierScore, @@ -29,3 +28,4 @@ SILog, ThresholdAccuracy, ) +from .sparsification import AUSE diff --git a/torch_uncertainty/metrics/classification/__init__.py b/torch_uncertainty/metrics/classification/__init__.py index 0e454888..840d543b 100644 --- a/torch_uncertainty/metrics/classification/__init__.py +++ b/torch_uncertainty/metrics/classification/__init__.py @@ -17,5 +17,4 @@ RiskAt80Cov, RiskAtxCov, ) -from .sparsification import AUSE from .variation_ratio import VariationRatio diff --git a/torch_uncertainty/metrics/classification/risk_coverage.py b/torch_uncertainty/metrics/classification/risk_coverage.py index dced8409..8ab62d00 100644 --- a/torch_uncertainty/metrics/classification/risk_coverage.py +++ b/torch_uncertainty/metrics/classification/risk_coverage.py @@ -146,7 +146,7 @@ def plot( ax.set_xlabel("Coverage (%)", fontsize=16) ax.set_ylabel("Risk - Error Rate (%)", fontsize=16) ax.set_xlim(0, 100) - ax.set_ylim(0, 100) + ax.set_ylim(0, min(100, np.ceil(error_rates.max() * 100))) ax.set_aspect("equal", "box") ax.legend(loc="upper right") fig.tight_layout() @@ -269,7 +269,7 @@ def plot( ax.set_xlabel("Coverage (%)", fontsize=16) ax.set_ylabel("Generalized Risk (%)", fontsize=16) ax.set_xlim(0, 100) - ax.set_ylim(0, 100) + ax.set_ylim(0, min(100, np.ceil(error_rates.max() * 100))) ax.set_aspect("equal", "box") ax.legend(loc="upper right") fig.tight_layout() diff --git a/torch_uncertainty/metrics/classification/sparsification.py b/torch_uncertainty/metrics/sparsification.py similarity index 96% rename from torch_uncertainty/metrics/classification/sparsification.py rename to torch_uncertainty/metrics/sparsification.py index 82fe41f8..1a55a92b 100644 --- a/torch_uncertainty/metrics/classification/sparsification.py +++ b/torch_uncertainty/metrics/sparsification.py @@ -36,7 +36,7 @@ def __init__(self, **kwargs) -> None: Inputs: - :attr:`scores`: Uncertainty scores of shape :math:`(B,)`. A higher score means a higher uncertainty. - - :attr:`errors`: Binary errors of shape :math:`(B,)`, + - :attr:`errors`: Errors of shape :math:`(B,)`, where :math:`B` is the batch size. @@ -52,7 +52,7 @@ def update(self, scores: Tensor, errors: Tensor) -> None: Args: scores (Tensor): uncertainty scores of shape :math:`(B,)` - errors (Tensor): binary errors of shape :math:`(B,)` + errors (Tensor): errors of shape :math:`(B,)` """ self.scores.append(scores) self.errors.append(errors) @@ -149,7 +149,7 @@ def _ause_rejection_rate_compute( Args: scores (Tensor): uncertainty scores of shape :math:`(B,)` - errors (Tensor): binary errors of shape :math:`(B,)` + errors (Tensor): errors of shape :math:`(B,)` """ num_samples = errors.size(0) diff --git a/torch_uncertainty/routines/classification.py b/torch_uncertainty/routines/classification.py index b7e0262b..23da6188 100644 --- a/torch_uncertainty/routines/classification.py +++ b/torch_uncertainty/routines/classification.py @@ -392,7 +392,7 @@ def training_step( loss = self.loss(logits, target, self.current_epoch) if self.needs_step_update: self.model.update_wrapper(self.current_epoch) - self.log("train_loss", loss) + self.log("train_loss", loss, prog_bar=True, logger=True) return loss def validation_step( @@ -501,7 +501,15 @@ def test_step( self.ood_logit_storage.append(logits.detach().cpu()) def on_validation_epoch_end(self) -> None: - self.log_dict(self.val_cls_metrics.compute(), sync_dist=True) + self.log_dict( + self.val_cls_metrics.compute(), logger=True, sync_dist=True + ) + self.log( + "Acc%", + self.val_cls_metrics["cls/Acc"].compute() * 100, + prog_bar=True, + logger=False, + ) self.val_cls_metrics.reset() if self.eval_grouping_loss: diff --git a/torch_uncertainty/routines/pixel_regression.py b/torch_uncertainty/routines/pixel_regression.py index b9762ffc..2c36ce52 100644 --- a/torch_uncertainty/routines/pixel_regression.py +++ b/torch_uncertainty/routines/pixel_regression.py @@ -191,7 +191,7 @@ def training_step( if self.needs_step_update: self.model.update_wrapper(self.current_epoch) - self.log("train_loss", loss) + self.log("train_loss", loss, prog_bar=True, logger=True) return loss def validation_step( diff --git a/torch_uncertainty/routines/regression.py b/torch_uncertainty/routines/regression.py index b118590a..cccf712d 100644 --- a/torch_uncertainty/routines/regression.py +++ b/torch_uncertainty/routines/regression.py @@ -167,7 +167,7 @@ def training_step( if self.needs_step_update: self.model.update_wrapper(self.current_epoch) - self.log("train_loss", loss) + self.log("train_loss", loss, prog_bar=True, logger=True) return loss def validation_step( diff --git a/torch_uncertainty/routines/segmentation.py b/torch_uncertainty/routines/segmentation.py index 966553d1..03fa2f58 100644 --- a/torch_uncertainty/routines/segmentation.py +++ b/torch_uncertainty/routines/segmentation.py @@ -170,7 +170,7 @@ def training_step( loss = self.loss(logits[valid_mask], target[valid_mask]) if self.needs_step_update: self.model.update_wrapper(self.current_epoch) - self.log("train_loss", loss) + self.log("train_loss", loss, prog_bar=True, logger=True) return loss def validation_step( @@ -214,7 +214,14 @@ def test_step(self, batch: tuple[Tensor, Tensor], batch_idx: int) -> None: self.test_sbsmpl_seg_metrics.update(*self.subsample(probs, targets)) def on_validation_epoch_end(self) -> None: - self.log_dict(self.val_seg_metrics.compute(), sync_dist=True) + self.log_dict( + self.val_seg_metrics.compute(), logger=True, sync_dist=True + ) + self.log( + "mIoU%", + self.val_seg_metrics["seg/mIoU"].compute() * 100, + prog_bar=True, + ) self.log_dict(self.val_sbsmpl_seg_metrics.compute(), sync_dist=True) self.val_seg_metrics.reset() self.val_sbsmpl_seg_metrics.reset() diff --git a/torch_uncertainty/utils/hub.py b/torch_uncertainty/utils/hub.py index acb7e3f5..67fe81d3 100644 --- a/torch_uncertainty/utils/hub.py +++ b/torch_uncertainty/utils/hub.py @@ -3,7 +3,7 @@ import torch import yaml from huggingface_hub import hf_hub_download -from huggingface_hub.utils._errors import EntryNotFoundError +from huggingface_hub.errors import EntryNotFoundError from safetensors.torch import load_file