Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test by default on linux, osx, win #793

Merged
merged 4 commits into from
Jul 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Garak pytest
name: Garak pytest - Linux

on:
push:
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/test_macos.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
name: Garak pytest - MacOS

on: [workflow_dispatch]
on:
push:
branches:
- 'main'
pull_request:
workflow_dispatch:

jobs:
build_macos:
Expand Down
7 changes: 6 additions & 1 deletion .github/workflows/test_windows.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
name: Garak pytest - Windows

on: [workflow_dispatch]
on:
push:
branches:
- 'main'
pull_request:
workflow_dispatch:

jobs:
build_windows:
Expand Down
4 changes: 3 additions & 1 deletion garak/probes/atkgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ class Tox(Probe):
"constructive_tension": True,
"red_team_model_type": "huggingface.Pipeline",
"red_team_model_name": "leondz/artgpt2tox",
"red_team_model_config": {},
"red_team_model_config": {
"hf_args": {"device": "cpu", "torch_dtype": "float32"}
}, # defer acceleration devices to model under test unless overriden
"red_team_prompt_template": "<|input|>[query]<|response|>",
"red_team_postproc_rm_regex": "\<\|.*",
"use_only_first_sent": True, # should we only consider the first sentence of the target's response?
Expand Down
22 changes: 12 additions & 10 deletions tests/generators/test_huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,23 @@
DEFAULT_GENERATIONS_QTY = 10


def test_pipeline():
@pytest.fixture
def hf_generator_config():
gen_config = {
"huggingface": {
"Pipeline": {
"name": "gpt2",
"hf_args": {
"device": "cpu",
},
}
"hf_args": {
"device": "cpu",
"torch_dtype": "float32",
},
}
}
config_root = GarakSubConfig()
setattr(config_root, "generators", gen_config)
return config_root

g = garak.generators.huggingface.Pipeline("gpt2", config_root=config_root)

def test_pipeline(hf_generator_config):
g = garak.generators.huggingface.Pipeline("gpt2", config_root=hf_generator_config)
assert g.name == "gpt2"
assert g.generations == DEFAULT_GENERATIONS_QTY
assert isinstance(g.generator, transformers.pipelines.text_generation.Pipeline)
Expand Down Expand Up @@ -53,8 +55,8 @@ def test_inference():
assert isinstance(item, str)


def test_model():
g = garak.generators.huggingface.Model("gpt2")
def test_model(hf_generator_config):
g = garak.generators.huggingface.Model("gpt2", config_root=hf_generator_config)
assert g.name == "gpt2"
assert g.generations == DEFAULT_GENERATIONS_QTY
assert isinstance(g, garak.generators.huggingface.Model)
Expand Down
1 change: 1 addition & 0 deletions tests/plugins/test_plugin_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ def temp_cache_location(request) -> None:
with tempfile.NamedTemporaryFile(buffering=0, delete=False) as tmp:
PluginCache._user_plugin_cache_file = tmp.name
PluginCache._plugin_cache_file = tmp.name
tmp.close()
os.remove(tmp.name)
# reset the class level singleton
PluginCache._plugin_cache_dict = None
Expand Down
9 changes: 8 additions & 1 deletion tests/probes/test_probes_atkgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,14 @@ def test_atkgen_config():
p = garak._plugins.load_plugin("probes.atkgen.Tox")
rt_mod, rt_klass = p.red_team_model_type.split(".")
assert p.red_team_model_config == {
"generators": {rt_mod: {rt_klass: {"name": p.red_team_model_name}}}
"generators": {
rt_mod: {
rt_klass: {
"hf_args": {"device": "cpu", "torch_dtype": "float32"},
"name": p.red_team_model_name,
}
}
}
}


Expand Down
Loading