Skip to content

Commit

Permalink
Cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
mgoin committed May 10, 2024
1 parent b6c1005 commit 3b16e08
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 27 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ on:
- main

jobs:
ruff:
test:
runs-on: ubuntu-latest
strategy:
matrix:
Expand Down
9 changes: 7 additions & 2 deletions auto_fp8/quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,9 @@ def quantize_activations(model, calibration_tokens):
if not isinstance(dynamic_quant_linear, FP8DynamicLinear):
continue
quantizer = FP8StaticLinearQuantizer(
dynamic_quant_linear.weight, dynamic_quant_linear.weight_scale, dynamic_quant_linear.bias
dynamic_quant_linear.weight,
dynamic_quant_linear.weight_scale,
dynamic_quant_linear.bias,
)
replace_module(model, name, quantizer)
del dynamic_quant_linear
Expand All @@ -197,7 +199,10 @@ def quantize_activations(model, calibration_tokens):
if not isinstance(quantizer, FP8StaticLinearQuantizer):
continue
static_proj = FP8StaticLinear(
quantizer.weight, quantizer.weight_scale, quantizer.bias, quantizer.act_scale
quantizer.weight,
quantizer.weight_scale,
quantizer.bias,
quantizer.act_scale,
)
replace_module(model, name, static_proj)
del quantizer
Expand Down
13 changes: 5 additions & 8 deletions example.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,13 @@
quantized_model_dir = "opt-125m-fp8"

tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
examples = [
"auto-fp8 is an easy-to-use model quantization library"
]
examples = ["auto-fp8 is an easy-to-use model quantization library"]
examples = tokenizer(examples, return_tensors="pt").to("cuda")

quantize_config = BaseQuantizeConfig(
quant_method="fp8",
activation_scheme="static"
)
quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static")

model = AutoFP8ForCausalLM.from_pretrained(pretrained_model_dir, quantize_config=quantize_config)
model = AutoFP8ForCausalLM.from_pretrained(
pretrained_model_dir, quantize_config=quantize_config
)
model.quantize(examples)
model.save_quantized(quantized_model_dir)
2 changes: 1 addition & 1 deletion examples/original_quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def forward(self, x):
def replace_module(model, name, new_module):
if "." in name:
parent_name = name.rsplit(".", 1)[0]
child_name = name[len(parent_name) + 1:]
child_name = name[len(parent_name) + 1 :]
parent = model.model.get_submodule(parent_name)
else:
parent_name = ""
Expand Down
11 changes: 8 additions & 3 deletions quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ def forward(self, x):
def replace_module(model, name, new_module):
if "." in name:
parent_name = name.rsplit(".", 1)[0]
child_name = name[len(parent_name) + 1:]
child_name = name[len(parent_name) + 1 :]
parent = model.model.get_submodule(parent_name)
else:
parent_name = ""
Expand Down Expand Up @@ -193,7 +193,9 @@ def quantize_activations(model, calibration_tokens):
if not isinstance(dynamic_quant_linear, FP8DynamicLinear):
continue
quantizer = FP8StaticLinearQuantizer(
dynamic_quant_linear.weight, dynamic_quant_linear.weight_scale, dynamic_quant_linear.bias
dynamic_quant_linear.weight,
dynamic_quant_linear.weight_scale,
dynamic_quant_linear.bias,
)
replace_module(model, name, quantizer)
del dynamic_quant_linear
Expand All @@ -212,7 +214,10 @@ def quantize_activations(model, calibration_tokens):
if not isinstance(quantizer, FP8StaticLinearQuantizer):
continue
static_proj = FP8StaticLinear(
quantizer.weight, quantizer.weight_scale, quantizer.bias, quantizer.act_scale
quantizer.weight,
quantizer.weight_scale,
quantizer.bias,
quantizer.act_scale,
)
replace_module(model, name, static_proj)
del quantizer
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
description="FP8 quantization for Transformers.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/neuralmagic/auto_fp8",
url="https://github.com/neuralmagic/AutoFP8",
packages=find_packages(),
install_requires=[
"torch>=2.2",
Expand Down
25 changes: 14 additions & 11 deletions tests/test_auto_fp8.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,31 @@
import os
from transformers import AutoTokenizer
from auto_fp8 import AutoFP8ForCausalLM, BaseQuantizeConfig
import shutil


def test_quantization():
model_id = "facebook/opt-125m"
quantized_model_dir = "opt-125m-fp8"

tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
examples = [
"auto-fp8 is an easy-to-use model quantization library"
]
examples = tokenizer(examples, return_tensors="pt").to("cuda")
examples = ["auto-fp8 is an easy-to-use model quantization library"]
examples = tokenizer(examples, return_tensors="pt")

quantize_config = BaseQuantizeConfig(
quant_method="fp8", activation_scheme="static"
)
quantize_config = BaseQuantizeConfig(quant_method="fp8", activation_scheme="static")

model = AutoFP8ForCausalLM.from_pretrained(model_id, quantize_config=quantize_config, device_map="auto")
model = AutoFP8ForCausalLM.from_pretrained(
model_id, quantize_config=quantize_config
)
model.model.to("cpu")

model.quantize(examples)
model.save_quantized(quantized_model_dir)

# We expect the model to be < 160MB
# Measure checkpoint size and cleanup
model_size = os.path.getsize(f"{quantized_model_dir}/model.safetensors")
target_size = 160 * (1024*1024)
assert model_size < target_size
shutil.rmtree(quantized_model_dir)

# We expect the model to be < 160MB
target_size = 160 * (1024 * 1024)
assert model_size < target_size

0 comments on commit 3b16e08

Please sign in to comment.