From d6054cb5d8230a39e834518560c7f6b26b9acf89 Mon Sep 17 00:00:00 2001 From: Julian Fong <44014224+julian-fong@users.noreply.github.com> Date: Mon, 6 Jan 2025 12:04:32 -0500 Subject: [PATCH] [BUG] Fix `AdapterPlus` config (#775) This pr fixes the configuration parameters set in the `AdapterPlusConfig` edit: This pr also incorporates some updates as described inside the comments in #764 1) Added some more information regarding training configurations inside the `AdapterPlusConfig` and its corresponding notebook 2) Added more info regarding layer norms inside the documentation --- docs/methods.md | 5 +++++ notebooks/ViT_AdapterPlus_FineTuning.ipynb | 15 +++++++++++++-- src/adapters/configuration/adapter_config.py | 11 ++++++++++- 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/docs/methods.md b/docs/methods.md index 302b1973d..95226e357 100644 --- a/docs/methods.md +++ b/docs/methods.md @@ -59,6 +59,11 @@ _Papers:_ * [Adapters Strike Back](https://arxiv.org/pdf/2406.06820) (Steitz and Roth., 2024) * [AdapterHub: A Framework for Adapting Transformers](https://arxiv.org/pdf/2007.07779.pdf) (Pfeiffer et al., 2020) +```{eval-rst} +.. note:: + The two parameters ``original_ln_before`` and ``original_ln_after`` inside bottleneck adapters control both the addition of the residual input and the application of the pretrained layer norm. If the original model does not apply a layer norm function at a specific position of the forward function (e.g after the FFN layer), the two bottleneck parameters of the adapter set at that same position will only control the application of the residual input. +``` + ## Language Adapters - Invertible Adapters _Configuration class_: [`SeqBnInvConfig`](adapters.SeqBnInvConfig), [`DoubleSeqBnInvConfig`](adapters.DoubleSeqBnInvConfig) diff --git a/notebooks/ViT_AdapterPlus_FineTuning.ipynb b/notebooks/ViT_AdapterPlus_FineTuning.ipynb index 1cf549ea7..6833a6b0e 100644 --- a/notebooks/ViT_AdapterPlus_FineTuning.ipynb +++ b/notebooks/ViT_AdapterPlus_FineTuning.ipynb @@ -205,7 +205,18 @@ "source": [ "### Loading the `ViT` model and the `AdapterPlusConfig`\n", "\n", - "Here we load the `vit-base-patch16-224-in21k` model similar to the one used in the `AdapterConfig` paper. We will load the model using the `adapters` `AutoAdapterModel` and add the corresponding `AdapterPlusConfig`. To read more about the config, you can check out the docs page [here](https://docs.adapterhub.ml/methods#bottleneck-adapters) under `AdapterPlusConfig`" + "Here we load the `vit-base-patch16-224-in21k` model similar to the one used in the `AdapterConfig` paper. We will load the model using the `adapters` `AutoAdapterModel` and add the corresponding `AdapterPlusConfig`. To read more about the config, you can check out the docs page [here](https://docs.adapterhub.ml/methods#bottleneck-adapters) under `AdapterPlusConfig`.\n", + "\n", + "#### Important Note\n", + "\n", + "Please note that some configurations of the adapters parameters `original_ln_after`, `original_ln_before`, and \n", + "`residual_before_ln` may result in performance issues when training. \n", + "\n", + "In the general case:\n", + "\n", + "1) At least one of `original_ln_before` or `original_ln_after` should be set to `True` in order to ensure that the original residual\n", + " connection from pre-training is preserved. \n", + "2) If `original_ln_after` is set to `False`, `residual_before_ln` must also be set to `False` to ensure convergence during training." ] }, { @@ -218,7 +229,7 @@ "from adapters import AdapterPlusConfig\n", "\n", "model = ViTAdapterModel.from_pretrained(model_name_or_path)\n", - "config = AdapterPlusConfig(original_ln_after=True)\n", + "config = AdapterPlusConfig()\n", "\n", "model.add_adapter(\"adapterplus_config\", config)\n", "model.add_image_classification_head(\"adapterplus_config\", num_labels=num_classes)\n", diff --git a/src/adapters/configuration/adapter_config.py b/src/adapters/configuration/adapter_config.py index b5249cb9f..9e1cf052a 100644 --- a/src/adapters/configuration/adapter_config.py +++ b/src/adapters/configuration/adapter_config.py @@ -374,10 +374,19 @@ class ParBnConfig(BnConfig): class AdapterPlusConfig(BnConfig): """ The AdapterPlus config architecture proposed by Jan-Martin O, Steitz and Stefan Roth. See https://arxiv.org/pdf/2406.06820 + + Please note that some configurations of the adapters parameters `original_ln_after`, `original_ln_before`, and + `residual_before_ln` may result in performance issues when training. + + In the general case: + 1) At least one of `original_ln_before` or `original_ln_after` should be set to True in order to ensure that the original residual + connection from pre-training is preserved. + 2) If `original_ln_after` is set to `False`, `residual_before_ln` must also be set to `False` to ensure convergence during training. """ original_ln_after: bool = False - residual_before_ln: bool = True + original_ln_before: bool = True + residual_before_ln: bool = False stochastic_depth: float = 0.1 init_weights: str = "houlsby" scaling: Union[float, str] = "channel"