Skip to content

Commit

Permalink
Fix import structure
Browse files Browse the repository at this point in the history
  • Loading branch information
TimoImhof committed Jan 10, 2025
1 parent a122a75 commit d67692d
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 0 deletions.
2 changes: 2 additions & 0 deletions src/adapters/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@
"models.llama": ["LlamaAdapterModel"],
"models.mbart": ["MBartAdapterModel"],
"models.mistral": ["MistralAdapterModel"],
"models.mllama": ["MllamaAdapterModel"],
"models.mt5": ["MT5AdapterModel"],
"models.plbart": ["PLBartAdapterModel"],
"models.roberta": ["RobertaAdapterModel"],
Expand Down Expand Up @@ -222,6 +223,7 @@
from .models.llama import LlamaAdapterModel
from .models.mbart import MBartAdapterModel
from .models.mistral import MistralAdapterModel
from .models.mllama import MllamaAdapterModel
from .models.mt5 import MT5AdapterModel
from .models.plbart import PLBartAdapterModel
from .models.roberta import RobertaAdapterModel
Expand Down
21 changes: 21 additions & 0 deletions src/adapters/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,17 @@
)
from .xmod.mixin_xmod import XmodModelAdaptersMixin

from .mllama.mixin_mllama import (
MllamaCrossAttentionDecoderLayerAdaptersMixin,
MllamaSelfAttentionDecoderLayerAdaptersMixin,
MllamaTextCrossAttentionAdaptersMixin,
MllamaTextModelAdaptersMixin,
MllamaTextSelfAttentionAdaptersMixin,
MllamaVisionAttentionAdaptersMixin,
MllamaVisionEncoderAdaptersMixin,
MllamaVisionEncoderLayerAdaptersMixin,
MllamaVisionModelAdaptersMixin,
)

# IMPORTANT: Only add classes to this mapping that are not copied into the adapters package
MODEL_MIXIN_MAPPING = {
Expand Down Expand Up @@ -109,4 +120,14 @@
"WhisperForAudioClassification": WhisperForAudioClassificationWithHeadsMixin,
"LlamaForQuestionAnswering": LlamaForQuestionAnsweringAdapterMixin,
"MistralModel": MistralModelAdapterMixin,
# Mulitmodal Llama
"MllamaVisionModel": MllamaVisionModelAdaptersMixin,
"MllamaTextModel": MllamaTextModelAdaptersMixin,
"MllamaVisionEncoder": MllamaVisionEncoderAdaptersMixin,
"MllamaVisionAttention": MllamaVisionAttentionAdaptersMixin,
"MllamaTextSelfAttention": MllamaTextSelfAttentionAdaptersMixin,
"MllamaTextCrossAttention": MllamaTextCrossAttentionAdaptersMixin,
"MllamaVisionEncoderLayer": MllamaVisionEncoderLayerAdaptersMixin,
"MllamaSelfAttentionDecoderLayer": MllamaSelfAttentionDecoderLayerAdaptersMixin,
"MllamaCrossAttentionDecoderLayer": MllamaCrossAttentionDecoderLayerAdaptersMixin,
}
1 change: 1 addition & 0 deletions src/adapters/models/auto/adapter_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
("llama", "LlamaAdapterModel"),
("mbart", "MBartAdapterModel"),
("mistral", "MistralAdapterModel"),
("mllama", "MllamaAdapterModel")
("mt5", "MT5AdapterModel"),
("plbart", "PLBartAdapterModel"),
("roberta", "RobertaAdapterModel"),
Expand Down
File renamed without changes.
1 change: 1 addition & 0 deletions src/adapters/wrappers/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
"attention_probs_dropout_prob": "attention_dropout",
},
"xlm_roberta": {},
# TODO: add mllama
}
SUBMODEL_NAMES = {"clip": ["vision_config", "text_config"], "encoder-decoder": ["encoder", "decoder"]}

Expand Down

0 comments on commit d67692d

Please sign in to comment.