diff --git a/app/services/problem_check/ai_llm_status.rb b/app/services/problem_check/ai_llm_status.rb index 3f365f5a5..5e9f7ba9e 100644 --- a/app/services/problem_check/ai_llm_status.rb +++ b/app/services/problem_check/ai_llm_status.rb @@ -12,7 +12,6 @@ def call def llm_errors return [] if !SiteSetting.discourse_ai_enabled - LlmModel.in_use.find_each.filter_map do |model| try_validate(model) { validator.run_test(model) } end @@ -20,7 +19,7 @@ def llm_errors def try_validate(model, &blk) begin - raise({ message: "Forced error for testing" }.to_json) if Rails.env.test? + # raise({ message: "Forced error for testing" }.to_json) if Rails.env.test? blk.call nil rescue => e diff --git a/spec/services/problem_check/ai_llm_status_spec.rb b/spec/services/problem_check/ai_llm_status_spec.rb index 36c81a2ea..d563f8d5e 100644 --- a/spec/services/problem_check/ai_llm_status_spec.rb +++ b/spec/services/problem_check/ai_llm_status_spec.rb @@ -5,8 +5,25 @@ RSpec.describe ProblemCheck::AiLlmStatus do subject(:check) { described_class.new } + # let(:spec_model) do + # LlmModel.new( + # id: 50, + # display_name: "GPT-4 Turbo", + # name: "gpt-4-turbo", + # provider: "open_ai", + # tokenizer: "DiscourseAi::Tokenizer::OpenAiTokenizer", + # max_prompt_tokens: 131_072, + # api_key: "invalid", + # url: "https://api.openai.com/v1/chat/completions", + # ) + # end + + fab!(:llm_model) + before do - assign_fake_provider_to(:ai_summarization_model) + pp "Spec model: #{llm_model.inspect}" + SiteSetting.ai_summarization_model = "custom:#{llm_model.id}" + # assign_fake_provider_to(:ai_summarization_model) SiteSetting.ai_summarization_enabled = true end @@ -17,11 +34,23 @@ end context "with discourse-ai plugin enabled for the site" do - let(:llm_model) { LlmModel.in_use.first } + # let(:llm_model) { LlmModel.in_use.first } before { SiteSetting.discourse_ai_enabled = true } it "returns a problem with an LLM model" do + stub_request(:post, "https://api.openai.com/v1/chat/completions").with( + body: + "{\"model\":\"gpt-4-turbo\",\"messages\":[{\"role\":\"system\",\"content\":\"You are a helpful bot\"},{\"role\":\"user\",\"content\":\"How much is 1 + 1?\"}]}", + headers: { + "Accept" => "*/*", + "Accept-Encoding" => "gzip;q=1.0,deflate;q=0.6,identity;q=0.3", + "Authorization" => "Bearer 123", + "Content-Type" => "application/json", + "Host" => "api.openai.com", + "User-Agent" => "Ruby", + }, + ).to_return(status: 200, body: "", headers: {}) message = "#{I18n.t("dashboard.problem.ai_llm_status", { model_name: llm_model.display_name, model_id: llm_model.id })}"