Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimize and fix prompt generation #6

Merged
merged 5 commits into from
Oct 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/smoke_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
run: |
cd src/compute_horde_prompt_gen

python3 run.py --output_folder_path output/ --model_name mock --number_of_batches 5 --number_of_prompts_per_batch 20 --uuids uuid1,uuid2,uuid3,uuid4,uuid5
python3 run.py --output_folder_path output/ --model_name mock --number_of_prompts_per_batch 20 --uuids uuid1,uuid2,uuid3,uuid4,uuid5

echo -e "\ngenerated batches:"
ls
Expand Down
18 changes: 9 additions & 9 deletions src/compute_horde_prompt_gen/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@

class PromptGeneratingPrompt:
def random_select(self, arr: list[str], num: int = 5) -> str:
random.shuffle(arr)
return ", ".join(arr[:num]) + ", etc"
return random.choices(arr, k=num)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
return random.choices(arr, k=num)
return random.sample(arr, k=num)

I think we should not pick the same thing twice.


def generate_prompt(self, short=True) -> str:
themes = self.random_select(THEMES, num=3)
def random_select_str(self, arr: list[str], num: int = 5) -> str:
choices = self.random_select(arr, num)
return ", ".join(choices) + ", etc"

def generate_prompt(self, short=True) -> str:
if short:
return (
f"Generate a list of 10 questions or instruct tasks related to the themes of {themes}. "
f"Output each prompt on a new line without any extra commentary or special characters."
)
theme = self.random_select(THEMES, num=1)[0]
return f"{theme}"
themes = self.random_select_str(THEMES, num=3)

relevance_level = random.randint(5, 20)
complexity_level = random.randint(5, 20)
Expand All @@ -34,5 +34,5 @@ def generate_prompt(self, short=True) -> str:

def generate_role(self, short=True) -> str:
if short:
return "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model."
return "You are my friend helping me study. I say theme and you ask me question about it. Quiestions have to be short but open. Write only question and nothing more."
return "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model. For each prompt, consider what aspect of the language model's capabilities it is designed to test and ensure that the set of prompts covers a broad spectrum of potential use cases for the language model. Only output the prompts, one per line without any extra commentary. Do not use any special characters or formatting, numbering or styling in the output."
44 changes: 23 additions & 21 deletions src/compute_horde_prompt_gen/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import logging
import argparse
from collections import deque

from prompt import PromptGeneratingPrompt
from model import MockModel, Llama3, Phi3
Expand All @@ -19,7 +20,10 @@ def generate_prompts(
max_new_tokens: int = 2000,
temperature: float = 1.0,
filepath: str = "prompts.txt",
leftover_prompts: deque = None,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
leftover_prompts: deque = None,
leftover_prompts: deque | None = None,

Please don't hate me for this :D

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I... I... :D

):
if leftover_prompts is None:
leftover_prompts = deque()
prompt_generator = PromptGeneratingPrompt()

i = -1
Expand All @@ -38,22 +42,27 @@ def generate_prompts(
)

seconds_taken = (datetime.datetime.now() - start_ts).total_seconds()
log.info(f"{i=} generation took {seconds_taken:.2f}s")

new_prompts = []
for j, sequence in enumerate(sequences):
generated_prompts = parse_output(sequence)
log.debug(f"{i=} sequence={j} {generated_prompts=} from {sequence=}")

log.info(f"{i=} sequence={j} generated {len(generated_prompts)} prompts")
new_prompts.extend(generated_prompts)

# check_prompts_quality(new_prompts)

# remove any duplicates
new_prompts = list(set(new_prompts))
log.info(
f"{i=} generation took {seconds_taken:.2f}s; generated {len(new_prompts)} prompts"
)

if total_prompts - len(new_prompts) < 0:
# Use leftover prompts from previous batch if available
while leftover_prompts and len(new_prompts) < total_prompts:
new_prompts.append(leftover_prompts.popleft())

if len(new_prompts) > total_prompts:
# Save extra prompts for next batch
leftover_prompts.extend(new_prompts[total_prompts:])
Comment on lines +63 to +65
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you need the original value of total_prompts that was passed in the function. We are subtracting the number of prompts generated from this variable.

new_prompts = new_prompts[:total_prompts]

total_prompts -= len(new_prompts)
Expand All @@ -62,6 +71,8 @@ def generate_prompts(
if total_prompts == 0:
break

return leftover_prompts


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate prompts")
Expand All @@ -74,19 +85,19 @@ def generate_prompts(
parser.add_argument(
"--batch_size",
type=int,
default=20,
default=262, # on A6000 we want 240 prompts generated in single file, but not all results are valid
help="Batch size - number of prompts given as input per generation request",
)
parser.add_argument(
"--num_return_sequences",
type=int,
default=5,
default=1, # better to generate as many as possible prompts on different themes
help="Number of return sequences outputted for each prompt given as input",
)
parser.add_argument(
"--max_new_tokens",
type=int,
default=500,
default=40, # 40 new tokens is enough for reasonable length prompt - 30 caused too much cut off prompts
help="Max new tokens",
)
parser.add_argument(
Expand All @@ -108,16 +119,10 @@ def generate_prompts(
default="./saved_models/",
help="Path to load the model and tokenizer from",
)
parser.add_argument(
"--number_of_batches",
type=int,
default=None,
help="Number of batches to generate",
)
parser.add_argument(
"--number_of_prompts_per_batch",
type=int,
required=True,
default=240,
help="Number of prompts per uuid batch",
)
parser.add_argument(
Expand All @@ -137,11 +142,6 @@ def generate_prompts(

uuids = args.uuids.split(",")

if args.number_of_batches:
assert (
len(uuids) == args.number_of_batches
), "Number of uuids should be equal to number of batches requested"

model_path = os.path.join(args.model_path, args.model_name)
if args.model_name == "mock":
model = MockModel()
Expand All @@ -158,16 +158,18 @@ def generate_prompts(
else:
raise ValueError(f"Invalid model name: {args.model_name}")

leftover_prompts = None
for uuid in uuids:
start_ts = datetime.datetime.now()
generate_prompts(
leftover_prompts = generate_prompts(
model,
total_prompts=args.number_of_prompts_per_batch,
batch_size=args.batch_size,
num_return_sequences=args.num_return_sequences,
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
filepath=os.path.join(args.output_folder_path, f"prompts_{uuid}.txt"),
leftover_prompts=leftover_prompts,
)
seconds_taken = (datetime.datetime.now() - start_ts).total_seconds()
log.info(
Expand Down
26 changes: 19 additions & 7 deletions src/compute_horde_prompt_gen/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,34 @@

def clean_line(line: str) -> str:
line = line.strip()
head, sep, tail = line.partition("<|")
if head:
line = head.strip()
else:
# if we started with a tag we assume that inside we find our prompt
line = tail.partition("|>")[2].partition("<|")[0].strip()
# remove list numbering if present
line = re.sub(r"^\s*\d+\.?\s*", "", line)
# strip quotations
line = line.strip("\"'")
Comment on lines +19 to +20
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Won't that mess up lines, where a quote mark is at the start/end of the line, and the other quote in the middle of the line? The strip will only remote the quote at the start/end, not the quote in the middle.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we don't care - we care ONLY about the lines that are single line prompt - and ends with ? - if result is broken or "not perfect" - it will be ignored

return line


def parse_output(output: str) -> list[str]:
# split into lines and clean them
lines = output.split("\n")
lines = [clean_line(line) for line in lines]
for line in lines:
cleaned_line = clean_line(line)
# we skip if line is too short or too long and not ends with ?
# in most cases it would be just first line
if (
len(cleaned_line) > 10
and len(cleaned_line) < 300
and cleaned_line.endswith("?")
):
return [cleaned_line]

# filter out null lines or prompts that are too short or long
lines = [line for line in lines if (len(line) > 10 and len(line) < 300)]

# skip first line as that's frequently broken (i.e. "Here are the prompts:")
# skip last line as it might not be comletely generated
return lines[1:-1]
return []


def check_prompts_quality(prompts: list[str]):
Expand Down
Loading