diff --git a/src/compressed_tensors/compressors/sparse_compressors/base.py b/src/compressed_tensors/compressors/sparse_compressors/base.py index 735c6c2c..7cd6e8e8 100644 --- a/src/compressed_tensors/compressors/sparse_compressors/base.py +++ b/src/compressed_tensors/compressors/sparse_compressors/base.py @@ -78,8 +78,7 @@ def compress( f"Compressing model with {len(model_state)} parameterized layers..." ) for name, value in tqdm(model_state.items(), desc="Compressing model"): - ignored = not self.should_compress(name, compression_targets) - if ignored: + if not self.should_compress(name, compression_targets): compressed_dict[name] = value continue prefix = name diff --git a/tests/testing_utils.py b/tests/testing_utils.py index 9137fdf8..ebe7a0c6 100644 --- a/tests/testing_utils.py +++ b/tests/testing_utils.py @@ -130,7 +130,7 @@ def induce_sparsity(tensor, sparsity_ratio) -> "torch.Tensor": def is_gpu_available(): """ - Check for GPU and warn if not found + :return: True if a GPU is available, False otherwise """ try: import torch # noqa: F401