Skip to content

Commit

Permalink
- bump lightning dev sha
Browse files Browse the repository at this point in the history
- use stable PyTorch channel for 2.3.0 CI
- remove use of removed upstream type alias
  • Loading branch information
speediedan committed May 6, 2024
1 parent c97610b commit 687144c
Show file tree
Hide file tree
Showing 13 changed files with 30 additions and 16 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,15 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).

## [2.3.0] - 2024-XX-XX

### Added

- Support for Lightning and PyTorch ``2.3.0``

## [2.2.4] - 2024-05-04

### Added

- Support for Lightning ``2.2.4`` and PyTorch ``2.2.2``

## [2.2.1] - 2024-03-04

Expand Down
3 changes: 3 additions & 0 deletions CITATION.cff
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,9 @@ identifiers:
- description: "Fine-Tuning Scheduler (v2.2.1)"
type: doi
value: 10.5281/zenodo.10780386
- description: "Fine-Tuning Scheduler (v2.2.4)"
type: doi
value: 10.5281/zenodo.11114374
license: "Apache-2.0"
url: "https://finetuning-scheduler.readthedocs.io/"
repository-code: "https://github.com/speediedan/finetuning-scheduler"
Expand Down
4 changes: 2 additions & 2 deletions dockers/base-cuda/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ RUN \
else \
# or target a specific cuda build, by specifying a particular index url w/...
# ... default channel
# pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121; \
pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121; \
# ... pytorch patch version
# pip install torch==1.11.1+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html; \
# ... pytorch nightly dev version
pip install --pre torch==2.3.0.dev20240209 torchvision==0.18.0.dev20240209 -f https://download.pytorch.org/whl/nightly/cu121/torch_nightly.html; \
#pip install --pre torch==2.3.0.dev20240209 torchvision==0.18.0.dev20240209 -f https://download.pytorch.org/whl/nightly/cu121/torch_nightly.html; \
# ... test channel
# pip install --pre torch torchvision -f https://download.pytorch.org/whl/test/cu121/torch_test.html; \
fi && \
Expand Down
2 changes: 1 addition & 1 deletion dockers/docker_images_release.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ maybe_build(){

build_eval(){
# latest PyTorch image supported by release
declare -A iv=(["cuda"]="12.1.0" ["python"]="3.11" ["pytorch"]="2.2.1" ["lightning"]="2.2" ["cust_build"]="0")
declare -A iv=(["cuda"]="12.1.0" ["python"]="3.11" ["pytorch"]="2.2.2" ["lightning"]="2.2" ["cust_build"]="0")
export latest_pt="base-cu${iv["cuda"]}-py${iv["python"]}-pt${iv["pytorch"]}-pl${iv["lightning"]}"
export latest_azpl="py${iv["python"]}-pt${iv["pytorch"]}-pl${iv["lightning"]}-azpl-init"
maybe_build iv "${latest_pt}" "${latest_azpl}"
Expand Down
2 changes: 1 addition & 1 deletion dockers/release-conda/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ ENV \
# LD_LIBRARY_PATH="/root/miniconda3/lib:$LD_LIBRARY_PATH" \
CUDA_TOOLKIT_ROOT_DIR="/usr/local/cuda" \
MAKEFLAGS="-j2" \
TORCH_CUDA_ARCH_LIST="6.0;7.0;7.5;8.0;8.6" \
TORCH_CUDA_ARCH_LIST="6.0;7.0;7.5;8.0;8.6:9.0" \
CONDA_ENV=finetuning-scheduler \
CONDA_DEFAULT_ENV=${CONDA_ENV}

Expand Down
2 changes: 1 addition & 1 deletion requirements/base.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#lightning>=2.3.0,<2.3.1
# the below is uncommented when master is targeting a specific pl dev master commit
git+https://github.com/Lightning-AI/lightning.git@a89ea11799c2801b386b6295742abcde0dc4ed08#egg=lightning
git+https://github.com/Lightning-AI/lightning.git@0f12271d7feeacb6fbe5d70d2ce057da4a04d8b4#egg=lightning
torch>=2.0.0
mpmath<1.4.0 # temporary requirement to avoid installation of alpha version of mpmath
2 changes: 1 addition & 1 deletion requirements/cli.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
jsonargparse[signatures]>=4.26.1
jsonargparse[signatures]>=4.27.7
omegaconf>=2.1.0
hydra-core>=1.1.0
6 changes: 4 additions & 2 deletions requirements/pl_adjust_versions.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@

# IMPORTANT: this list needs to be sorted in reverse
VERSIONS = [
dict(torch="2.3.0", torchvision="0.18.0"), # nightly
dict(torch="2.2.1", torchvision="0.17.1"), # stable
dict(torch="2.4.0", torchvision="0.19.0"), # nightly
dict(torch="2.3.0", torchvision="0.18.0"), # stable
dict(torch="2.2.2", torchvision="0.17.2"),
dict(torch="2.2.1", torchvision="0.17.1"),
dict(torch="2.2.0", torchvision="0.17.0"),
dict(torch="2.1.2", torchvision="0.16.2"),
dict(torch="2.1.1", torchvision="0.16.1"),
Expand Down
2 changes: 1 addition & 1 deletion requirements/standalone_base.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#pytorch-lightning>=2.3.0,<2.3.1
# the below is uncommented when master is targeting a specific pl dev master commit
git+https://github.com/Lightning-AI/pytorch-lightning.git@a89ea11799c2801b386b6295742abcde0dc4ed08#egg=pytorch-lightning
git+https://github.com/Lightning-AI/pytorch-lightning.git@0f12271d7feeacb6fbe5d70d2ce057da4a04d8b4#egg=pytorch-lightning
torch>=2.0.0
mpmath<1.4.0 # temporary requirement to avoid installation of alpha version of mpmath
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def _setup_args(standalone: bool = False) -> Dict[str, Any]:
_INSTALL_PATHS["require"],
file_name=base_reqs,
standalone=standalone,
pl_commit="a89ea11799c2801b386b6295742abcde0dc4ed08",
pl_commit="0f12271d7feeacb6fbe5d70d2ce057da4a04d8b4",
)
base_setup["install_requires"] = install_requires
return base_setup
Expand Down
2 changes: 1 addition & 1 deletion src/finetuning_scheduler/fts_supporters.py
Original file line number Diff line number Diff line change
Expand Up @@ -979,7 +979,7 @@ def reinit_optimizer(self, new_optimizer: Dict, trainer: pl.Trainer, init_params
group["initial_lr"] = group.get("initial_lr", group["lr"])
trainer.strategy.optimizers = [new_optimizer_handle] # type: ignore[list-item]
if trainer.lr_scheduler_configs:
trainer.lr_scheduler_configs[0].scheduler.optimizer = new_optimizer_handle
trainer.lr_scheduler_configs[0].scheduler.optimizer = new_optimizer_handle # type: ignore[assignment]
self._maybe_trace_reinit("optimizer", prev_optim_repr, repr(trainer.strategy.optimizers[0]))
return new_optimizer_handle # type:ignore[return-value]

Expand Down
5 changes: 3 additions & 2 deletions src/finetuning_scheduler/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
from typing_extensions import TypeAlias

import torch
from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER, Optimizable, ReduceLROnPlateau
from torch.optim.lr_scheduler import LRScheduler, ReduceLROnPlateau
from lightning.fabric.utilities.types import Optimizable
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint


Expand All @@ -46,6 +47,6 @@ def add_param_group(self, param_group: Dict[Any, Any]) -> None:
"LinearLR",
]
FTSLRSchedulerTypeTuple = tuple(getattr(torch.optim.lr_scheduler, lr_class) for lr_class in supported_lrs)
FTSLRSchedulerType = Union[Type[_TORCH_LRSCHEDULER], Type[ReduceLROnPlateau]]
FTSLRSchedulerType = Union[Type[LRScheduler], Type[ReduceLROnPlateau]]

BaseCallbackDepType: TypeAlias = Union[Type[EarlyStopping], Type[ModelCheckpoint]]
5 changes: 2 additions & 3 deletions tests/helpers/boring_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,12 @@
from warnings import WarningMessage

import torch
from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER
from lightning.pytorch import LightningDataModule, LightningModule
from lightning.pytorch.core.optimizer import LightningOptimizer
from lightning.pytorch.utilities.types import STEP_OUTPUT
from torch import Tensor
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.lr_scheduler import LambdaLR, LRScheduler
from torch.utils.data import DataLoader, Dataset, IterableDataset, Subset


Expand Down Expand Up @@ -183,7 +182,7 @@ def test_step(self, batch: Tensor, batch_idx: int) -> Optional[STEP_OUTPUT]:
# def test_epoch_end(self, outputs) -> None:
# torch.stack([x["y"] for x in outputs]).mean()

def configure_optimizers(self) -> Tuple[List[torch.optim.Optimizer], List[_TORCH_LRSCHEDULER]]:
def configure_optimizers(self) -> Tuple[List[torch.optim.Optimizer], List[LRScheduler]]:
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
Expand Down

0 comments on commit 687144c

Please sign in to comment.