From 2e1e90bf9296e568c93df212f13b02355e03392e Mon Sep 17 00:00:00 2001 From: arnab39 Date: Tue, 12 Mar 2024 15:57:28 -0400 Subject: [PATCH] run autoflake --- .pre-commit-config.yaml | 18 +++++++++--------- equiadapt/common/basecanonicalization.py | 1 - .../images/canonicalization/discrete_group.py | 2 +- .../custom_equivariant_networks.py | 2 -- .../nbody/canonicalization/continuous_group.py | 7 ------- .../euclideangraph_base_models.py | 7 +------ .../euclideangraph_model.py | 8 +------- .../nbody/canonicalization_networks/gcl.py | 2 -- .../canonicalization_networks/image_model.py | 2 +- .../image_networks.py | 1 - .../nbody/canonicalization_networks/resnet.py | 2 -- .../set_base_models.py | 1 - .../canonicalization_networks/set_model.py | 3 --- .../canonicalization_networks/vn_layers.py | 6 ------ .../canonicalization/continuous_group.py | 1 - .../equivariant_networks.py | 2 +- .../images/classification/inference_utils.py | 2 -- .../images/classification/prepare/__init__.py | 4 ---- .../classification/prepare/cifar_data.py | 2 +- .../classification/prepare/imagenet_data.py | 4 +--- .../prepare/rotated_mnist_data.py | 1 - .../classification/prepare/stl10_data.py | 2 +- examples/images/classification/train_utils.py | 2 +- examples/images/common/__init__.py | 1 - .../images/segmentation/prepare/__init__.py | 1 - .../segmentation/prepare/vision_transforms.py | 1 - examples/images/segmentation/train_utils.py | 2 +- examples/nbody/model.py | 4 +--- examples/nbody/model_utils.py | 2 -- examples/nbody/prepare/nbody_data.py | 5 +---- examples/nbody/train.py | 3 +-- examples/pointcloud/classification/train.py | 1 - examples/pointcloud/common/networks.py | 3 --- examples/pointcloud/part_segmentation/train.py | 1 - 34 files changed, 22 insertions(+), 84 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97970af..90e3ab6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,15 +24,15 @@ repos: # - id: pyupgrade # args: ['--py37-plus'] -## If you want to avoid flake8 errors due to unused vars or imports: -# - repo: https://github.com/PyCQA/autoflake -# rev: v2.1.1 -# hooks: -# - id: autoflake -# args: [ -# --in-place, -# --remove-all-unused-imports, -# ] +# If you want to avoid flake8 errors due to unused vars or imports: +- repo: https://github.com/PyCQA/autoflake + rev: v2.1.1 + hooks: + - id: autoflake + args: [ + --in-place, + --remove-all-unused-imports, + ] # - repo: https://github.com/PyCQA/isort # rev: 5.13.2 diff --git a/equiadapt/common/basecanonicalization.py b/equiadapt/common/basecanonicalization.py index 3935bf6..3873733 100644 --- a/equiadapt/common/basecanonicalization.py +++ b/equiadapt/common/basecanonicalization.py @@ -1,4 +1,3 @@ -from abc import ABC, abstractmethod from typing import Any, Dict, List, Tuple, Union import torch diff --git a/equiadapt/images/canonicalization/discrete_group.py b/equiadapt/images/canonicalization/discrete_group.py index 2133c2b..61411ec 100644 --- a/equiadapt/images/canonicalization/discrete_group.py +++ b/equiadapt/images/canonicalization/discrete_group.py @@ -1,5 +1,5 @@ import math -from typing import Any, List, Tuple, Union +from typing import List, Tuple, Union import kornia as K import torch diff --git a/equiadapt/images/canonicalization_networks/custom_equivariant_networks.py b/equiadapt/images/canonicalization_networks/custom_equivariant_networks.py index a3d0cd0..17609e9 100644 --- a/equiadapt/images/canonicalization_networks/custom_equivariant_networks.py +++ b/equiadapt/images/canonicalization_networks/custom_equivariant_networks.py @@ -1,6 +1,4 @@ -import math -import kornia as K import torch import torch.nn as nn from .custom_group_equivariant_layers import RotationEquivariantConvLift, RotationEquivariantConv, RotoReflectionEquivariantConvLift, RotoReflectionEquivariantConv diff --git a/equiadapt/nbody/canonicalization/continuous_group.py b/equiadapt/nbody/canonicalization/continuous_group.py index ff2be44..2f2f651 100644 --- a/equiadapt/nbody/canonicalization/continuous_group.py +++ b/equiadapt/nbody/canonicalization/continuous_group.py @@ -1,12 +1,5 @@ import torch -import kornia as K from equiadapt.common.basecanonicalization import ContinuousGroupCanonicalization -from equiadapt.common.utils import gram_schmidt -from torch.nn.modules import Module -from torchvision import transforms -import math -from torch.nn import functional as F -from equiadapt.common.utils import gram_schmidt class ContinuousGroupNBody(ContinuousGroupCanonicalization): def __init__(self, diff --git a/equiadapt/nbody/canonicalization_networks/euclideangraph_base_models.py b/equiadapt/nbody/canonicalization_networks/euclideangraph_base_models.py index 5aeaffd..b979cf0 100644 --- a/equiadapt/nbody/canonicalization_networks/euclideangraph_base_models.py +++ b/equiadapt/nbody/canonicalization_networks/euclideangraph_base_models.py @@ -1,16 +1,11 @@ -import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable import pytorch_lightning as pl -from pytorch3d.transforms import RotateAxisAngle, Rotate, random_rotations -import torchmetrics.functional as tmf import wandb import torch_scatter as ts import math from equiadapt.nbody.canonicalization_networks.gcl import E_GCL_vel, GCL -from equiadapt.nbody.canonicalization_networks.vn_layers import VNLinearLeakyReLU, VNLinear, VNLeakyReLU, VNSoftplus +from equiadapt.nbody.canonicalization_networks.vn_layers import VNLeakyReLU, VNSoftplus from equiadapt.nbody.canonicalization_networks.set_base_models import SequentialMultiple diff --git a/equiadapt/nbody/canonicalization_networks/euclideangraph_model.py b/equiadapt/nbody/canonicalization_networks/euclideangraph_model.py index bbe3356..d70b6b3 100644 --- a/equiadapt/nbody/canonicalization_networks/euclideangraph_model.py +++ b/equiadapt/nbody/canonicalization_networks/euclideangraph_model.py @@ -1,14 +1,8 @@ import torch -import torch.nn as nn -import torch.nn.functional as F import pytorch_lightning as pl -from pytorch3d.transforms import RotateAxisAngle, Rotate, random_rotations -import torchmetrics.functional as tmf -import wandb -from equiadapt.nbody.canonicalization_networks.vn_layers import * from equiadapt.nbody.canonicalization_networks.euclideangraph_base_models import EGNN_vel, GNN, VNDeepSets, BaseEuclideangraphModel, Transformer -from canonical_network.utils import define_hyperparams, dict_to_object +from canonical_network.utils import define_hyperparams # Input dim is 6 because location and velocity vectors are concatenated. NBODY_HYPERPARAMS = { diff --git a/equiadapt/nbody/canonicalization_networks/gcl.py b/equiadapt/nbody/canonicalization_networks/gcl.py index 5a319b6..e8e3b57 100644 --- a/equiadapt/nbody/canonicalization_networks/gcl.py +++ b/equiadapt/nbody/canonicalization_networks/gcl.py @@ -1,7 +1,5 @@ from torch import nn import torch -import torch.nn.functional as F -import torch_scatter as ts class MLP(nn.Module): diff --git a/equiadapt/nbody/canonicalization_networks/image_model.py b/equiadapt/nbody/canonicalization_networks/image_model.py index 201ecc3..d798b26 100644 --- a/equiadapt/nbody/canonicalization_networks/image_model.py +++ b/equiadapt/nbody/canonicalization_networks/image_model.py @@ -1,7 +1,7 @@ from torch import optim, nn import pytorch_lightning as pl import torch -from torch.optim.lr_scheduler import OneCycleLR, MultiStepLR +from torch.optim.lr_scheduler import MultiStepLR from canonical_network.models.image_networks import VanillaNetwork, EquivariantCanonizationNetwork, \ BasicConvEncoder, Identity, PCACanonizationNetwork, RotationEquivariantConvEncoder, OptimizationCanonizationNetwork from canonical_network.models.resnet import resnet44 diff --git a/equiadapt/nbody/canonicalization_networks/image_networks.py b/equiadapt/nbody/canonicalization_networks/image_networks.py index 33916a6..20e35bc 100644 --- a/equiadapt/nbody/canonicalization_networks/image_networks.py +++ b/equiadapt/nbody/canonicalization_networks/image_networks.py @@ -4,7 +4,6 @@ import torch from canonical_network.models.equivariant_layers import RotationEquivariantConvLift, \ RotoReflectionEquivariantConvLift, RotationEquivariantConv, RotoReflectionEquivariantConv -from torchvision import transforms from canonical_network.models.set_base_models import SequentialMultiple import numpy as np diff --git a/equiadapt/nbody/canonicalization_networks/resnet.py b/equiadapt/nbody/canonicalization_networks/resnet.py index 0a67258..2987156 100644 --- a/equiadapt/nbody/canonicalization_networks/resnet.py +++ b/equiadapt/nbody/canonicalization_networks/resnet.py @@ -27,12 +27,10 @@ If you use this implementation in you work, please don't forget to mention the author, Yerlan Idelbayev. ''' -import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init -from torch.autograd import Variable __all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202'] diff --git a/equiadapt/nbody/canonicalization_networks/set_base_models.py b/equiadapt/nbody/canonicalization_networks/set_base_models.py index 29c5ae4..9a78cdb 100644 --- a/equiadapt/nbody/canonicalization_networks/set_base_models.py +++ b/equiadapt/nbody/canonicalization_networks/set_base_models.py @@ -1,4 +1,3 @@ -from sched import scheduler import torch import torch.nn as nn import torch.nn.functional as F diff --git a/equiadapt/nbody/canonicalization_networks/set_model.py b/equiadapt/nbody/canonicalization_networks/set_model.py index 26a7b89..c1184f7 100644 --- a/equiadapt/nbody/canonicalization_networks/set_model.py +++ b/equiadapt/nbody/canonicalization_networks/set_model.py @@ -1,13 +1,10 @@ -from collections import namedtuple -from turtle import forward import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter as ts import pytorch_lightning as pl -import torchmetrics.functional as tmf from einops import rearrange import wandb diff --git a/equiadapt/nbody/canonicalization_networks/vn_layers.py b/equiadapt/nbody/canonicalization_networks/vn_layers.py index 9372e54..b1b0c57 100644 --- a/equiadapt/nbody/canonicalization_networks/vn_layers.py +++ b/equiadapt/nbody/canonicalization_networks/vn_layers.py @@ -1,11 +1,5 @@ -import os -import sys -import copy -import math -import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F EPS = 1e-6 diff --git a/equiadapt/pointcloud/canonicalization/continuous_group.py b/equiadapt/pointcloud/canonicalization/continuous_group.py index b64da6d..bbad5e7 100644 --- a/equiadapt/pointcloud/canonicalization/continuous_group.py +++ b/equiadapt/pointcloud/canonicalization/continuous_group.py @@ -2,7 +2,6 @@ # This is meant to be a proof of concept and we are happy to receive contribution to extend this to other group actions. import torch -import kornia as K from equiadapt.common.basecanonicalization import ContinuousGroupCanonicalization from equiadapt.common.utils import gram_schmidt from typing import Any, List, Tuple, Union diff --git a/equiadapt/pointcloud/canonicalization_networks/equivariant_networks.py b/equiadapt/pointcloud/canonicalization_networks/equivariant_networks.py index 494656b..f0bf582 100644 --- a/equiadapt/pointcloud/canonicalization_networks/equivariant_networks.py +++ b/equiadapt/pointcloud/canonicalization_networks/equivariant_networks.py @@ -1,6 +1,6 @@ import torch import torch.nn as nn -from equiadapt.pointcloud.canonicalization_networks.vector_neuron_layers import VNLinearLeakyReLU, VNLinear, VNMaxPool, VNBatchNorm +from equiadapt.pointcloud.canonicalization_networks.vector_neuron_layers import VNLinearLeakyReLU, VNMaxPool, VNBatchNorm def knn(x, k): inner = -2 * torch.matmul(x.transpose(2, 1), x) diff --git a/examples/images/classification/inference_utils.py b/examples/images/classification/inference_utils.py index 27b2020..9218602 100644 --- a/examples/images/classification/inference_utils.py +++ b/examples/images/classification/inference_utils.py @@ -1,11 +1,9 @@ import math -from typing import Dict, Union import torch from omegaconf import DictConfig from torchvision import transforms -import wandb def get_inference_method(canonicalizer: torch.nn.Module, diff --git a/examples/images/classification/prepare/__init__.py b/examples/images/classification/prepare/__init__.py index 1d6d4f8..e69de29 100644 --- a/examples/images/classification/prepare/__init__.py +++ b/examples/images/classification/prepare/__init__.py @@ -1,4 +0,0 @@ -from .cifar_data import CIFAR10DataModule, CIFAR100DataModule -from .imagenet_data import ImageNetDataModule -from .rotated_mnist_data import RotatedMNISTDataModule -from .stl10_data import STL10DataModule diff --git a/examples/images/classification/prepare/cifar_data.py b/examples/images/classification/prepare/cifar_data.py index 9f42cc1..4370e0c 100644 --- a/examples/images/classification/prepare/cifar_data.py +++ b/examples/images/classification/prepare/cifar_data.py @@ -3,7 +3,7 @@ import random import pytorch_lightning as pl -from torch.utils.data import DataLoader, random_split +from torch.utils.data import DataLoader from torchvision import transforms from torchvision.datasets import CIFAR10, CIFAR100 diff --git a/examples/images/classification/prepare/imagenet_data.py b/examples/images/classification/prepare/imagenet_data.py index 33caf62..d01357a 100644 --- a/examples/images/classification/prepare/imagenet_data.py +++ b/examples/images/classification/prepare/imagenet_data.py @@ -1,13 +1,11 @@ -import os import random -from typing import List import pytorch_lightning as pl import torch import torchvision import torchvision.transforms as transforms -from PIL import Image, ImageOps +from PIL import ImageOps from torch import nn DEFAULT_CROP_RATIO = 224/256 diff --git a/examples/images/classification/prepare/rotated_mnist_data.py b/examples/images/classification/prepare/rotated_mnist_data.py index 4134ffe..8d8747a 100644 --- a/examples/images/classification/prepare/rotated_mnist_data.py +++ b/examples/images/classification/prepare/rotated_mnist_data.py @@ -1,4 +1,3 @@ -import argparse import os import urllib.request as url_req import zipfile diff --git a/examples/images/classification/prepare/stl10_data.py b/examples/images/classification/prepare/stl10_data.py index 7541657..ccfd711 100644 --- a/examples/images/classification/prepare/stl10_data.py +++ b/examples/images/classification/prepare/stl10_data.py @@ -3,7 +3,7 @@ import random import pytorch_lightning as pl -from torch.utils.data import DataLoader, random_split +from torch.utils.data import DataLoader from torchvision import transforms from torchvision.datasets import STL10 diff --git a/examples/images/classification/train_utils.py b/examples/images/classification/train_utils.py index 75a2b8e..91407f7 100644 --- a/examples/images/classification/train_utils.py +++ b/examples/images/classification/train_utils.py @@ -1,4 +1,4 @@ -from typing import Dict, Optional +from typing import Optional import dotenv import pytorch_lightning as pl diff --git a/examples/images/common/__init__.py b/examples/images/common/__init__.py index e0b0945..e69de29 100644 --- a/examples/images/common/__init__.py +++ b/examples/images/common/__init__.py @@ -1 +0,0 @@ -from .utils import get_canonicalization_network, get_canonicalizer diff --git a/examples/images/segmentation/prepare/__init__.py b/examples/images/segmentation/prepare/__init__.py index ab3e630..e69de29 100644 --- a/examples/images/segmentation/prepare/__init__.py +++ b/examples/images/segmentation/prepare/__init__.py @@ -1 +0,0 @@ -from .coco_data import COCODataModule diff --git a/examples/images/segmentation/prepare/vision_transforms.py b/examples/images/segmentation/prepare/vision_transforms.py index 1949df9..e1ce092 100644 --- a/examples/images/segmentation/prepare/vision_transforms.py +++ b/examples/images/segmentation/prepare/vision_transforms.py @@ -2,7 +2,6 @@ import torch from torch import Tensor, nn -from torchvision import ops from torchvision.transforms import functional as F from torchvision.transforms import transforms as T diff --git a/examples/images/segmentation/train_utils.py b/examples/images/segmentation/train_utils.py index 3e26dcc..569293c 100644 --- a/examples/images/segmentation/train_utils.py +++ b/examples/images/segmentation/train_utils.py @@ -1,4 +1,4 @@ -from typing import Dict, Optional +from typing import Optional import dotenv import pytorch_lightning as pl diff --git a/examples/nbody/model.py b/examples/nbody/model.py index 1ea74f9..721ecbd 100644 --- a/examples/nbody/model.py +++ b/examples/nbody/model.py @@ -1,11 +1,9 @@ import torch import pytorch_lightning as pl -from torch.optim.lr_scheduler import MultiStepLR from examples.nbody.model_utils import get_canonicalization_network, get_prediction_network, get_edges import torch.nn as nn -from omegaconf import OmegaConf, DictConfig +from omegaconf import DictConfig from equiadapt.nbody.canonicalization.continuous_group import ContinuousGroupNBody -import wandb class NBodyPipeline(pl.LightningModule): def __init__(self, hyperparams: DictConfig): diff --git a/examples/nbody/model_utils.py b/examples/nbody/model_utils.py index b08b9b0..3a8b9b8 100644 --- a/examples/nbody/model_utils.py +++ b/examples/nbody/model_utils.py @@ -1,6 +1,4 @@ from equiadapt.nbody.canonicalization_networks.euclideangraph_base_models import EGNN_vel, VNDeepSets, GNN, Transformer -from collections import namedtuple -from omegaconf import OmegaConf import torch diff --git a/examples/nbody/prepare/nbody_data.py b/examples/nbody/prepare/nbody_data.py index d339d60..7bd936c 100644 --- a/examples/nbody/prepare/nbody_data.py +++ b/examples/nbody/prepare/nbody_data.py @@ -1,11 +1,8 @@ import numpy as np import torch -from torch.utils.data import DataLoader, Dataset +from torch.utils.data import DataLoader import pytorch_lightning as pl -import os -import json import numpy as np -from torch.utils.data import Dataset import pathlib SRC_PATH = pathlib.Path(__file__).parent.parent diff --git a/examples/nbody/train.py b/examples/nbody/train.py index fe97320..eb29bf9 100644 --- a/examples/nbody/train.py +++ b/examples/nbody/train.py @@ -4,9 +4,8 @@ import pytorch_lightning as pl import wandb import os -from omegaconf import DictConfig, OmegaConf +from omegaconf import OmegaConf from examples.nbody.model import NBodyPipeline -import torch from examples.nbody.prepare.nbody_data import NBodyDataModule diff --git a/examples/pointcloud/classification/train.py b/examples/pointcloud/classification/train.py index aad8e8e..c2cdd5e 100644 --- a/examples/pointcloud/classification/train.py +++ b/examples/pointcloud/classification/train.py @@ -7,7 +7,6 @@ from omegaconf import DictConfig, OmegaConf import pytorch_lightning as pl from pytorch_lightning.loggers import WandbLogger -from argparse import ArgumentParser from prepare import ModelNetDataModule from train_utils import get_model_pipeline, get_callbacks, get_checkpoint_name, get_trainer, load_envs diff --git a/examples/pointcloud/common/networks.py b/examples/pointcloud/common/networks.py index 7345379..7766579 100644 --- a/examples/pointcloud/common/networks.py +++ b/examples/pointcloud/common/networks.py @@ -1,10 +1,7 @@ from omegaconf import DictConfig import torch -import numpy as np import torch.nn as nn import torch.nn.functional as F -import pytorch_lightning as pl -from torch.autograd import Variable import torch.nn.init as init def knn(x, k): diff --git a/examples/pointcloud/part_segmentation/train.py b/examples/pointcloud/part_segmentation/train.py index 985599b..3eb24c4 100644 --- a/examples/pointcloud/part_segmentation/train.py +++ b/examples/pointcloud/part_segmentation/train.py @@ -7,7 +7,6 @@ from omegaconf import DictConfig, OmegaConf import pytorch_lightning as pl from pytorch_lightning.loggers import WandbLogger -from argparse import ArgumentParser from prepare import ShapeNetDataModule from train_utils import get_model_pipeline, get_callbacks, get_checkpoint_name, get_trainer, load_envs