Skip to content

Commit

Permalink
Remove unused sparseml.export utilities (#950)
Browse files Browse the repository at this point in the history
* remove sparseml utilities

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>

* use in model_load

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>

* remove use of RECIPE FILE NAME

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>

* rename to RECIPE_FILE_NAME, avoid circular import

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>

* remove qa ignore

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>

---------

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
Co-authored-by: Dipika Sikka <dipikasikka1@gmail.com>
  • Loading branch information
kylesayrs and dsikka authored Dec 5, 2024
1 parent 1494fd6 commit 1830382
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 447 deletions.
4 changes: 2 additions & 2 deletions src/llmcompressor/pytorch/model_load/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@
"save_completed_stages",
]

RECIPE_FILE_NAME = "recipe.yaml"


def log_model_load(
model: Module, model_name_or_path: str, model_type: str, delayed_load: bool
Expand Down Expand Up @@ -106,6 +104,8 @@ def save_model_and_recipe(
:param save_safetensors: whether to save as safetensors or pickle (bin)
:param save_compressed: whether to compress sparse weights on disk
"""
# avoid circular import
from llmcompressor.transformers.utils.helpers import RECIPE_FILE_NAME

model.save_pretrained(
save_path, save_compressed=save_compressed, safe_serialization=save_safetensors
Expand Down
3 changes: 2 additions & 1 deletion src/llmcompressor/transformers/finetune/session_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@
from llmcompressor.modifiers.distillation.utils.pytorch.model_wrapper import (
KDModelWrapper,
)
from llmcompressor.pytorch.model_load.helpers import RECIPE_FILE_NAME, get_session_model
from llmcompressor.pytorch.model_load.helpers import get_session_model
from llmcompressor.pytorch.utils import ModuleSparsificationInfo
from llmcompressor.transformers import RECIPE_FILE_NAME
from llmcompressor.transformers.finetune.callbacks import (
DisableHalfPrecisionCallback,
TrainingLoopCallbacks,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from llmcompressor.transformers.compression.sparsity_config import (
SparsityConfigMetadata,
)
from llmcompressor.transformers.utils import RECIPE_FILE_NAME
from llmcompressor.utils.fsdp.helpers import (
find_and_move_state_dicts_to_cpu,
unwrap_and_export_model,
Expand Down Expand Up @@ -189,7 +190,7 @@ def skip(*args, **kwargs):
)
compressor.update_config(save_directory)

recipe_path = os.path.join(save_directory, "recipe.yaml")
recipe_path = os.path.join(save_directory, RECIPE_FILE_NAME)
session = active_session()

if (recipe_yaml_str := session.get_serialized_recipe()) is not None:
Expand Down
Loading

0 comments on commit 1830382

Please sign in to comment.