Skip to content

Commit

Permalink
FEATURE: Select stop sequences from triage script (#1010)
Browse files Browse the repository at this point in the history
  • Loading branch information
romanrizzi authored Dec 6, 2024
1 parent 7ebbcd2 commit 085dde7
Show file tree
Hide file tree
Showing 5 changed files with 31 additions and 3 deletions.
3 changes: 3 additions & 0 deletions config/locales/client.en.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,9 @@ en:
max_post_tokens:
label: "Max Post Tokens"
description: "The maximum number of tokens to scan using LLM triage"
stop_sequences:
label: "Stop Sequences"
description: "Instruct the model to halt token generation when arriving at one of these values"
search_for_text:
label: "Search for text"
description: "If the following text appears in the LLM reply, apply these actions"
Expand Down
4 changes: 4 additions & 0 deletions discourse_automation/llm_triage.rb
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
field :system_prompt, component: :message, required: false
field :search_for_text, component: :text, required: true
field :max_post_tokens, component: :text
field :stop_sequences, component: :text_list, required: false
field :model,
component: :choices,
required: true,
Expand Down Expand Up @@ -55,6 +56,8 @@

max_post_tokens = nil if max_post_tokens <= 0

stop_sequences = fields.dig("stop_sequences", "value")

if post.topic.private_message?
include_personal_messages = fields.dig("include_personal_messages", "value")
next if !include_personal_messages
Expand Down Expand Up @@ -88,6 +91,7 @@
flag_post: flag_post,
flag_type: flag_type.to_s.to_sym,
max_post_tokens: max_post_tokens,
stop_sequences: stop_sequences,
automation: self.automation,
)
rescue => e
Expand Down
4 changes: 3 additions & 1 deletion lib/automation/llm_triage.rb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ def self.handle(
flag_post: nil,
flag_type: nil,
automation: nil,
max_post_tokens: nil
max_post_tokens: nil,
stop_sequences: nil
)
if category_id.blank? && tags.blank? && canned_reply.blank? && hide_topic.blank? &&
flag_post.blank?
Expand All @@ -42,6 +43,7 @@ def self.handle(
temperature: 0,
max_tokens: 700, # ~500 words
user: Discourse.system_user,
stop_sequences: stop_sequences,
feature_name: "llm_triage",
feature_context: {
automation_id: automation&.id,
Expand Down
5 changes: 3 additions & 2 deletions lib/completions/endpoints/canned_response.rb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def normalize_model_params(model_params)
model_params
end

attr_reader :responses, :completions, :dialect
attr_reader :responses, :completions, :dialect, :model_params

def prompt_messages
dialect.prompt.messages
Expand All @@ -26,12 +26,13 @@ def prompt_messages
def perform_completion!(
dialect,
_user,
_model_params,
model_params,
feature_name: nil,
feature_context: nil,
partial_tool_calls: false
)
@dialect = dialect
@model_params = model_params
response = responses[completions]
if response.nil?
raise CANNED_RESPONSE_ERROR,
Expand Down
18 changes: 18 additions & 0 deletions spec/lib/modules/automation/llm_triage_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -180,4 +180,22 @@ def triage(**args)
expect(triage_prompt.messages.last[:upload_ids]).to contain_exactly(post_upload.id)
end
end

it "includes stop_sequences in the completion call" do
sequences = %w[GOOD BAD]

DiscourseAi::Completions::Llm.with_prepared_responses(["bad"]) do |spy|
triage(
post: post,
model: "custom:#{llm_model.id}",
system_prompt: "test %%POST%%",
search_for_text: "bad",
flag_post: true,
automation: nil,
stop_sequences: sequences,
)

expect(spy.model_params[:stop_sequences]).to contain_exactly(*sequences)
end
end
end

0 comments on commit 085dde7

Please sign in to comment.