Skip to content

Commit

Permalink
WIP: update spec
Browse files Browse the repository at this point in the history
  • Loading branch information
keegangeorge committed Dec 13, 2024
1 parent 34f5b15 commit 90fa0eb
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 4 deletions.
3 changes: 1 addition & 2 deletions app/services/problem_check/ai_llm_status.rb
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,14 @@ def call

def llm_errors
return [] if !SiteSetting.discourse_ai_enabled

LlmModel.in_use.find_each.filter_map do |model|
try_validate(model) { validator.run_test(model) }
end
end

def try_validate(model, &blk)
begin
raise({ message: "Forced error for testing" }.to_json) if Rails.env.test?
# raise({ message: "Forced error for testing" }.to_json) if Rails.env.test?
blk.call
nil
rescue => e
Expand Down
33 changes: 31 additions & 2 deletions spec/services/problem_check/ai_llm_status_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,25 @@
RSpec.describe ProblemCheck::AiLlmStatus do
subject(:check) { described_class.new }

# let(:spec_model) do
# LlmModel.new(
# id: 50,
# display_name: "GPT-4 Turbo",
# name: "gpt-4-turbo",
# provider: "open_ai",
# tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer",
# max_prompt_tokens: 131_072,
# api_key: "invalid",
# url: "https://api.openai.com/v1/chat/completions",
# )
# end

fab!(:llm_model)

before do
assign_fake_provider_to(:ai_summarization_model)
pp "Spec model: #{llm_model.inspect}"
SiteSetting.ai_summarization_model = "custom:#{llm_model.id}"
# assign_fake_provider_to(:ai_summarization_model)
SiteSetting.ai_summarization_enabled = true
end

Expand All @@ -17,11 +34,23 @@
end

context "with discourse-ai plugin enabled for the site" do
let(:llm_model) { LlmModel.in_use.first }
# let(:llm_model) { LlmModel.in_use.first }

before { SiteSetting.discourse_ai_enabled = true }

it "returns a problem with an LLM model" do
stub_request(:post, "https://api.openai.com/v1/chat/completions").with(
body:
"{\"model\":\"gpt-4-turbo\",\"messages\":[{\"role\":\"system\",\"content\":\"You are a helpful bot\"},{\"role\":\"user\",\"content\":\"How much is 1 + 1?\"}]}",
headers: {
"Accept" => "*/*",
"Accept-Encoding" => "gzip;q=1.0,deflate;q=0.6,identity;q=0.3",
"Authorization" => "Bearer 123",
"Content-Type" => "application/json",
"Host" => "api.openai.com",
"User-Agent" => "Ruby",
},
).to_return(status: 200, body: "", headers: {})
message =
"#{I18n.t("dashboard.problem.ai_llm_status", { model_name: llm_model.display_name, model_id: llm_model.id })}"

Expand Down

0 comments on commit 90fa0eb

Please sign in to comment.