diff --git a/CHANGELOG.md b/CHANGELOG.md
index b6503ee..381b5e7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,15 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
 
 ## [2.3.0] - 2024-XX-XX
 
+### Added
+
+- Support for Lightning and PyTorch ``2.3.0``
+
+## [2.2.4] - 2024-05-04
+
+### Added
+
+- Support for Lightning ``2.2.4`` and PyTorch ``2.2.2``
 
 ## [2.2.1] - 2024-03-04
 
diff --git a/CITATION.cff b/CITATION.cff
index 2ce78df..a20dacb 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -110,6 +110,9 @@ identifiers:
   - description: "Fine-Tuning Scheduler (v2.2.1)"
     type: doi
     value: 10.5281/zenodo.10780386
+  - description: "Fine-Tuning Scheduler (v2.2.4)"
+    type: doi
+    value: 10.5281/zenodo.11114374
 license: "Apache-2.0"
 url: "https://finetuning-scheduler.readthedocs.io/"
 repository-code: "https://github.com/speediedan/finetuning-scheduler"
diff --git a/dockers/base-cuda/Dockerfile b/dockers/base-cuda/Dockerfile
index 49b2ab5..21f6408 100644
--- a/dockers/base-cuda/Dockerfile
+++ b/dockers/base-cuda/Dockerfile
@@ -85,11 +85,11 @@ RUN \
     else \
         # or target a specific cuda build, by specifying a particular index url w/...
         # ... default channel
-        # pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121; \
+        pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu121; \
         # ... pytorch patch version
         # pip install torch==1.11.1+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html; \
         # ... pytorch nightly dev version
-        pip install --pre torch==2.3.0.dev20240209  torchvision==0.18.0.dev20240209 -f https://download.pytorch.org/whl/nightly/cu121/torch_nightly.html; \
+        #pip install --pre torch==2.3.0.dev20240209  torchvision==0.18.0.dev20240209 -f https://download.pytorch.org/whl/nightly/cu121/torch_nightly.html; \
         # ... test channel
         # pip install --pre torch torchvision -f https://download.pytorch.org/whl/test/cu121/torch_test.html; \
     fi && \
diff --git a/dockers/docker_images_release.sh b/dockers/docker_images_release.sh
index e28aa14..98dad9a 100755
--- a/dockers/docker_images_release.sh
+++ b/dockers/docker_images_release.sh
@@ -56,7 +56,7 @@ maybe_build(){
 
 build_eval(){
 	# latest PyTorch image supported by release
-	declare -A iv=(["cuda"]="12.1.0" ["python"]="3.11" ["pytorch"]="2.2.1" ["lightning"]="2.2" ["cust_build"]="0")
+	declare -A iv=(["cuda"]="12.1.0" ["python"]="3.11" ["pytorch"]="2.2.2" ["lightning"]="2.2" ["cust_build"]="0")
 	export latest_pt="base-cu${iv["cuda"]}-py${iv["python"]}-pt${iv["pytorch"]}-pl${iv["lightning"]}"
 	export latest_azpl="py${iv["python"]}-pt${iv["pytorch"]}-pl${iv["lightning"]}-azpl-init"
 	maybe_build iv "${latest_pt}" "${latest_azpl}"
diff --git a/dockers/release-conda/Dockerfile b/dockers/release-conda/Dockerfile
index 1705d1d..12fcab5 100644
--- a/dockers/release-conda/Dockerfile
+++ b/dockers/release-conda/Dockerfile
@@ -51,7 +51,7 @@ ENV \
     # LD_LIBRARY_PATH="/root/miniconda3/lib:$LD_LIBRARY_PATH" \
     CUDA_TOOLKIT_ROOT_DIR="/usr/local/cuda" \
     MAKEFLAGS="-j2" \
-    TORCH_CUDA_ARCH_LIST="6.0;7.0;7.5;8.0;8.6" \
+    TORCH_CUDA_ARCH_LIST="6.0;7.0;7.5;8.0;8.6:9.0" \
     CONDA_ENV=finetuning-scheduler \
     CONDA_DEFAULT_ENV=${CONDA_ENV}
 
diff --git a/requirements/base.txt b/requirements/base.txt
index c621ec4..2b891cd 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -1,5 +1,5 @@
 #lightning>=2.3.0,<2.3.1
 # the below is uncommented when master is targeting a specific pl dev master commit
-git+https://github.com/Lightning-AI/lightning.git@a89ea11799c2801b386b6295742abcde0dc4ed08#egg=lightning
+git+https://github.com/Lightning-AI/lightning.git@0f12271d7feeacb6fbe5d70d2ce057da4a04d8b4#egg=lightning
 torch>=2.0.0
 mpmath<1.4.0  # temporary requirement to avoid installation of alpha version of mpmath
diff --git a/requirements/cli.txt b/requirements/cli.txt
index fcf5ce9..ab70000 100644
--- a/requirements/cli.txt
+++ b/requirements/cli.txt
@@ -1,3 +1,3 @@
-jsonargparse[signatures]>=4.26.1
+jsonargparse[signatures]>=4.27.7
 omegaconf>=2.1.0
 hydra-core>=1.1.0
diff --git a/requirements/pl_adjust_versions.py b/requirements/pl_adjust_versions.py
index feadffb..7e0aa7c 100644
--- a/requirements/pl_adjust_versions.py
+++ b/requirements/pl_adjust_versions.py
@@ -5,8 +5,10 @@
 
 # IMPORTANT: this list needs to be sorted in reverse
 VERSIONS = [
-    dict(torch="2.3.0", torchvision="0.18.0"),  # nightly
-    dict(torch="2.2.1", torchvision="0.17.1"),  # stable
+    dict(torch="2.4.0", torchvision="0.19.0"),  # nightly
+    dict(torch="2.3.0", torchvision="0.18.0"),  # stable
+    dict(torch="2.2.2", torchvision="0.17.2"),
+    dict(torch="2.2.1", torchvision="0.17.1"),
     dict(torch="2.2.0", torchvision="0.17.0"),
     dict(torch="2.1.2", torchvision="0.16.2"),
     dict(torch="2.1.1", torchvision="0.16.1"),
diff --git a/requirements/standalone_base.txt b/requirements/standalone_base.txt
index 9831d34..33cbd33 100644
--- a/requirements/standalone_base.txt
+++ b/requirements/standalone_base.txt
@@ -1,5 +1,5 @@
 #pytorch-lightning>=2.3.0,<2.3.1
 # the below is uncommented when master is targeting a specific pl dev master commit
-git+https://github.com/Lightning-AI/pytorch-lightning.git@a89ea11799c2801b386b6295742abcde0dc4ed08#egg=pytorch-lightning
+git+https://github.com/Lightning-AI/pytorch-lightning.git@0f12271d7feeacb6fbe5d70d2ce057da4a04d8b4#egg=pytorch-lightning
 torch>=2.0.0
 mpmath<1.4.0  # temporary requirement to avoid installation of alpha version of mpmath
diff --git a/setup.py b/setup.py
index f75d7eb..52e96d3 100755
--- a/setup.py
+++ b/setup.py
@@ -138,7 +138,7 @@ def _setup_args(standalone: bool = False) -> Dict[str, Any]:
         _INSTALL_PATHS["require"],
         file_name=base_reqs,
         standalone=standalone,
-        pl_commit="a89ea11799c2801b386b6295742abcde0dc4ed08",
+        pl_commit="0f12271d7feeacb6fbe5d70d2ce057da4a04d8b4",
     )
     base_setup["install_requires"] = install_requires
     return base_setup
diff --git a/src/finetuning_scheduler/fts_supporters.py b/src/finetuning_scheduler/fts_supporters.py
index 76e94c2..93129b2 100644
--- a/src/finetuning_scheduler/fts_supporters.py
+++ b/src/finetuning_scheduler/fts_supporters.py
@@ -979,7 +979,7 @@ def reinit_optimizer(self, new_optimizer: Dict, trainer: pl.Trainer, init_params
             group["initial_lr"] = group.get("initial_lr", group["lr"])
         trainer.strategy.optimizers = [new_optimizer_handle]  # type: ignore[list-item]
         if trainer.lr_scheduler_configs:
-            trainer.lr_scheduler_configs[0].scheduler.optimizer = new_optimizer_handle
+            trainer.lr_scheduler_configs[0].scheduler.optimizer = new_optimizer_handle  # type: ignore[assignment]
         self._maybe_trace_reinit("optimizer", prev_optim_repr, repr(trainer.strategy.optimizers[0]))
         return new_optimizer_handle  # type:ignore[return-value]
 
diff --git a/src/finetuning_scheduler/types.py b/src/finetuning_scheduler/types.py
index ac135f1..ac9ab0c 100644
--- a/src/finetuning_scheduler/types.py
+++ b/src/finetuning_scheduler/types.py
@@ -20,7 +20,8 @@
 from typing_extensions import TypeAlias
 
 import torch
-from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER, Optimizable, ReduceLROnPlateau
+from torch.optim.lr_scheduler import LRScheduler, ReduceLROnPlateau
+from lightning.fabric.utilities.types import Optimizable
 from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
 
 
@@ -46,6 +47,6 @@ def add_param_group(self, param_group: Dict[Any, Any]) -> None:
     "LinearLR",
 ]
 FTSLRSchedulerTypeTuple = tuple(getattr(torch.optim.lr_scheduler, lr_class) for lr_class in supported_lrs)
-FTSLRSchedulerType = Union[Type[_TORCH_LRSCHEDULER], Type[ReduceLROnPlateau]]
+FTSLRSchedulerType = Union[Type[LRScheduler], Type[ReduceLROnPlateau]]
 
 BaseCallbackDepType: TypeAlias = Union[Type[EarlyStopping], Type[ModelCheckpoint]]
diff --git a/tests/helpers/boring_model.py b/tests/helpers/boring_model.py
index 1a30b0e..882e4f4 100644
--- a/tests/helpers/boring_model.py
+++ b/tests/helpers/boring_model.py
@@ -16,13 +16,12 @@
 from warnings import WarningMessage
 
 import torch
-from lightning.fabric.utilities.types import _TORCH_LRSCHEDULER
 from lightning.pytorch import LightningDataModule, LightningModule
 from lightning.pytorch.core.optimizer import LightningOptimizer
 from lightning.pytorch.utilities.types import STEP_OUTPUT
 from torch import Tensor
 from torch.optim import Optimizer
-from torch.optim.lr_scheduler import LambdaLR
+from torch.optim.lr_scheduler import LambdaLR, LRScheduler
 from torch.utils.data import DataLoader, Dataset, IterableDataset, Subset
 
 
@@ -183,7 +182,7 @@ def test_step(self, batch: Tensor, batch_idx: int) -> Optional[STEP_OUTPUT]:
     # def test_epoch_end(self, outputs) -> None:
     #     torch.stack([x["y"] for x in outputs]).mean()
 
-    def configure_optimizers(self) -> Tuple[List[torch.optim.Optimizer], List[_TORCH_LRSCHEDULER]]:
+    def configure_optimizers(self) -> Tuple[List[torch.optim.Optimizer], List[LRScheduler]]:
         optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
         lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
         return [optimizer], [lr_scheduler]