From f9e4d7c79fea9a6126c470a9d74f97d185a55588 Mon Sep 17 00:00:00 2001 From: Dipika Sikka Date: Tue, 14 Jan 2025 18:54:50 -0500 Subject: [PATCH] Sparse 2:4 + FP8 Quantization e2e vLLM tests (#1073) SUMMARY: - Add 2:4 Sparsity + FP8 Quantization e2e tests TEST PLAN: - Models produced by the tests: nm-testing/TinyLlama-1.1B-Chat-v1.0-sparse2of4_fp8_dynamic-e2e nm-testing/TinyLlama-1.1B-Chat-v1.0-sparse2of4_only-e2e - Verified to run e2e with vLLM Signed-off-by: Kyle Sayers --- .../vLLM/configs/sparse2of4_fp8_dynamic.yaml | 7 ++++++ tests/e2e/vLLM/configs/sparse_24.yaml | 8 ++++++ .../Sparse_2of4/recipe_sparse_2of4.yaml | 6 +++++ .../recipe_sparse_2of4_fp8_dynamic.yaml | 25 +++++++++++++++++++ tests/e2e/vLLM/test_vllm.py | 6 ++++- 5 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 tests/e2e/vLLM/configs/sparse2of4_fp8_dynamic.yaml create mode 100644 tests/e2e/vLLM/configs/sparse_24.yaml create mode 100644 tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4.yaml create mode 100644 tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4_fp8_dynamic.yaml diff --git a/tests/e2e/vLLM/configs/sparse2of4_fp8_dynamic.yaml b/tests/e2e/vLLM/configs/sparse2of4_fp8_dynamic.yaml new file mode 100644 index 000000000..e1785ce2c --- /dev/null +++ b/tests/e2e/vLLM/configs/sparse2of4_fp8_dynamic.yaml @@ -0,0 +1,7 @@ +cadence: "nightly" +test_type: "regression" +model: TinyLlama/TinyLlama-1.1B-Chat-v1.0 +recipe: tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4_fp8_dynamic.yaml +scheme: sparse2of4_fp8_dynamic +dataset_id: HuggingFaceH4/ultrachat_200k +dataset_split: train_sft \ No newline at end of file diff --git a/tests/e2e/vLLM/configs/sparse_24.yaml b/tests/e2e/vLLM/configs/sparse_24.yaml new file mode 100644 index 000000000..653168b97 --- /dev/null +++ b/tests/e2e/vLLM/configs/sparse_24.yaml @@ -0,0 +1,8 @@ +cadence: "nightly" +test_type: "regression" +model: TinyLlama/TinyLlama-1.1B-Chat-v1.0 +recipe: tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4.yaml +scheme: sparse2of4_only +dataset_id: HuggingFaceH4/ultrachat_200k +dataset_split: train_sft +save_compressed: False \ No newline at end of file diff --git a/tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4.yaml b/tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4.yaml new file mode 100644 index 000000000..895e02450 --- /dev/null +++ b/tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4.yaml @@ -0,0 +1,6 @@ +sparsity_stage: + sparsity_modifiers: + SparseGPTModifier: + sparsity: 0.5 + mask_structure: "2:4" + sequential_update: false diff --git a/tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4_fp8_dynamic.yaml b/tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4_fp8_dynamic.yaml new file mode 100644 index 000000000..1e6b350e1 --- /dev/null +++ b/tests/e2e/vLLM/recipes/Sparse_2of4/recipe_sparse_2of4_fp8_dynamic.yaml @@ -0,0 +1,25 @@ +sparsity_stage: + run_type: oneshot + sparsity_modifiers: + SparseGPTModifier: + sparsity: 0.5 + mask_structure: "2:4" + sequential_update: false +quantization_stage: + run_type: oneshot + quantization_modifiers: + ConstantPruningModifier: + targets: [ + 're:.*q_proj.weight', + 're:.*k_proj.weight', + 're:.*v_proj.weight', + 're:.*o_proj.weight', + 're:.*gate_proj.weight', + 're:.*up_proj.weight', + 're:.*down_proj.weight', + ] + start: 0 + QuantizationModifier: + targets: ["Linear"] + ignore: ["lm_head"] + scheme: "FP8_DYNAMIC" diff --git a/tests/e2e/vLLM/test_vllm.py b/tests/e2e/vLLM/test_vllm.py index e554ad3ff..70a6a35e4 100644 --- a/tests/e2e/vLLM/test_vllm.py +++ b/tests/e2e/vLLM/test_vllm.py @@ -21,6 +21,7 @@ vllm_installed = False logger.warning("vllm is not installed. This test will be skipped") + HF_MODEL_HUB_NAME = "nm-testing" TEST_DATA_FILE = os.environ.get("TEST_DATA_FILE", "") @@ -74,6 +75,7 @@ def set_up(self): self.recipe = eval_config.get("recipe") self.quant_type = eval_config.get("quant_type") self.save_dir = eval_config.get("save_dir") + self.save_compressed = eval_config.get("save_compressed", True) logger.info("========== RUNNING ==============") logger.info(self.scheme) @@ -113,7 +115,9 @@ def test_vllm(self): self._check_session_contains_recipe() logger.info("================= SAVING TO DISK ======================") - oneshot_model.save_pretrained(self.save_dir) + oneshot_model.save_pretrained( + self.save_dir, save_compressed=self.save_compressed + ) tokenizer.save_pretrained(self.save_dir) recipe_path = os.path.join(self.save_dir, "recipe.yaml")