Skip to content

Commit

Permalink
[Bug Fix]: Fix test cases and update version to 0.1.93 (#1303)
Browse files Browse the repository at this point in the history
  • Loading branch information
deshraj authored Mar 5, 2024
1 parent 11f4ce8 commit 4428768
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 3 deletions.
2 changes: 0 additions & 2 deletions embedchain/config/llm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ def __init__(
endpoint: Optional[str] = None,
model_kwargs: Optional[dict[str, Any]] = None,
local: Optional[bool] = False,
base_url: Optional[str] = None,
):
"""
Initializes a configuration class instance for the LLM.
Expand Down Expand Up @@ -172,7 +171,6 @@ def __init__(
self.endpoint = endpoint
self.model_kwargs = model_kwargs
self.local = local
self.base_url = base_url

if isinstance(prompt, str):
prompt = Template(prompt)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "embedchain"
version = "0.1.92"
version = "0.1.93"
description = "Simplest open source retrieval(RAG) framework"
authors = [
"Taranjeet Singh <taranjeet@embedchain.ai>",
Expand Down
1 change: 1 addition & 0 deletions tests/evaluation/test_answer_relevancy_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def mock_data():
@pytest.fixture
def mock_answer_relevance_metric(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "test_api_key")
monkeypatch.setenv("OPENAI_API_BASE", "test_api_base")
metric = AnswerRelevance()
return metric

Expand Down
2 changes: 2 additions & 0 deletions tests/llm/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def test_get_llm_model_answer_without_system_prompt(config, mocker):
max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p},
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
)


Expand All @@ -98,6 +99,7 @@ def test_get_llm_model_answer_with_tools(config, mocker, mock_return, expected):
max_tokens=config.max_tokens,
model_kwargs={"top_p": config.top_p},
api_key=os.environ["OPENAI_API_KEY"],
base_url=os.environ["OPENAI_API_BASE"],
)
mocked_convert_to_openai_tool.assert_called_once_with({"test": "test"})
mocked_json_output_tools_parser.assert_called_once()
Expand Down
1 change: 1 addition & 0 deletions tests/test_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
@pytest.fixture
def app():
os.environ["OPENAI_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_BASE"] = "test_api_base"
return App()


Expand Down
1 change: 1 addition & 0 deletions tests/test_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ class TestFactories:
def test_llm_factory_create(self, provider_name, config_data, expected_class):
os.environ["ANTHROPIC_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_KEY"] = "test_api_key"
os.environ["OPENAI_API_BASE"] = "test_api_base"
llm_instance = LlmFactory.create(provider_name, config_data)
assert isinstance(llm_instance, expected_class)

Expand Down

0 comments on commit 4428768

Please sign in to comment.