From 962ae11d46e4f78eefcc795553a1be3a28eec1e1 Mon Sep 17 00:00:00 2001 From: v1docq Date: Tue, 28 Nov 2023 13:46:26 +0300 Subject: [PATCH] Add torch losses to constant --- .../settings/constanst_repository.py | 25 ++++++++++++------- .../core/models/nn/network_modules/losses.py | 11 +++++--- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/fedot_ind/core/architecture/settings/constanst_repository.py b/fedot_ind/core/architecture/settings/constanst_repository.py index 304291403..10885e6cd 100644 --- a/fedot_ind/core/architecture/settings/constanst_repository.py +++ b/fedot_ind/core/architecture/settings/constanst_repository.py @@ -6,24 +6,17 @@ import torch from fedot.core.repository.dataset_types import DataTypesEnum from torch import nn, Tensor + +from fedot_ind.core.models.nn.network_modules.losses import * from fedot_ind.core.models.quantile.stat_features import * from fedot_ind.core.models.topological.topofeatures import * from fedot_ind.core.operation.transformation.data.hankel import HankelMatrix -from torch.nn.modules import Module def beta_thr(beta): return 0.56 * np.power(beta, 3) - 0.95 * np.power(beta, 2) + 1.82 * beta + 1.43 -class SMAPELoss(Module): - def __init__(self): - super().__init__() - - def forward(self, input: Tensor, target: Tensor) -> Tensor: - return 100 * torch.mean(2 * torch.abs(input - target) / (torch.abs(target) + torch.abs(input)) + 1e-8) - - class ComputationalConstant(Enum): CPU_NUMBERS = math.ceil(cpu_count() * 0.7) if cpu_count() > 1 else 1 @@ -145,6 +138,13 @@ class TorchLossesConstant(Enum): MULTI_CLASS_CROSS_ENTROPY = nn.BCEWithLogitsLoss MSE = nn.MSELoss SMAPE = SMAPELoss + TWEEDIE_LOSS = TweedieLoss + FOCAL_LOSS = FocalLoss + CENTER_PLUS_LOSS = CenterPlusLoss + CENTER_LOSS = CenterLoss + MASK_LOSS = MaskedLossWrapper + LOG_COSH_LOSS = LogCoshLoss + HUBER_LOSS = HuberLoss STAT_METHODS = FeatureConstant.STAT_METHODS.value @@ -178,3 +178,10 @@ class TorchLossesConstant(Enum): MULTI_CLASS_CROSS_ENTROPY = TorchLossesConstant.MULTI_CLASS_CROSS_ENTROPY.value MSE = TorchLossesConstant.MSE.value SMAPE = TorchLossesConstant.SMAPE.value +TWEEDIE_LOSS = TorchLossesConstant.TWEEDIE_LOSS.value +FOCAL_LOSS = TorchLossesConstant.FOCAL_LOSS.value +CENTER_PLUS_LOSS = TorchLossesConstant.CENTER_PLUS_LOSS.value +CENTER_LOSS = TorchLossesConstant.CENTER_LOSS.value +MASK_LOSS = TorchLossesConstant.MASK_LOSS.value +LOG_COSH_LOSS = TorchLossesConstant.LOG_COSH_LOSS.value +HUBER_LOSS = TorchLossesConstant.HUBER_LOSS.value \ No newline at end of file diff --git a/fedot_ind/core/models/nn/network_modules/losses.py b/fedot_ind/core/models/nn/network_modules/losses.py index 371cceffa..31b2f8497 100644 --- a/fedot_ind/core/models/nn/network_modules/losses.py +++ b/fedot_ind/core/models/nn/network_modules/losses.py @@ -5,7 +5,6 @@ from fastai.torch_core import Module import torch.nn.functional as F - class HuberLoss(nn.Module): """Huber loss @@ -64,7 +63,6 @@ def forward(self, inp, targ): return self.loss(inp, targ) -# %% ../nbs/016_losses.ipynb 9 class CenterLoss(Module): r""" Code in Pytorch has been slightly modified from: https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/center_loss.py @@ -142,7 +140,6 @@ def forward(self, x: Tensor, y: Tensor) -> Tensor: return loss -# %% ../nbs/016_losses.ipynb 14 class TweedieLoss(Module): def __init__(self, p=1.5, eps=1e-8): """ @@ -163,3 +160,11 @@ def forward(self, inp, targ): b = torch.exp((2 - self.p) * torch.log(inp)) / (2 - self.p) loss = -a + b return loss.mean() + + +class SMAPELoss(Module): + def __init__(self): + super().__init__() + + def forward(self, input: Tensor, target: Tensor) -> Tensor: + return 100 * torch.mean(2 * torch.abs(input - target) / (torch.abs(target) + torch.abs(input)) + 1e-8) \ No newline at end of file