From 664c7dfb08f0dae6eefbcf76e0a58ad9330a4a52 Mon Sep 17 00:00:00 2001 From: Michal Zukowski Date: Mon, 23 Sep 2024 23:13:43 +0200 Subject: [PATCH 1/5] Optimize and fix quality of prompt generation --- src/compute_horde_prompt_gen/prompt.py | 16 +++++++++------- src/compute_horde_prompt_gen/run.py | 10 +++++----- src/compute_horde_prompt_gen/utils.py | 24 ++++++++++++++++-------- 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/src/compute_horde_prompt_gen/prompt.py b/src/compute_horde_prompt_gen/prompt.py index eed919b..72b8c20 100644 --- a/src/compute_horde_prompt_gen/prompt.py +++ b/src/compute_horde_prompt_gen/prompt.py @@ -4,17 +4,19 @@ class PromptGeneratingPrompt: def random_select(self, arr: list[str], num: int = 5) -> str: - random.shuffle(arr) - return ", ".join(arr[:num]) + ", etc" + return random.choices(arr, k=num) - def generate_prompt(self, short=True) -> str: - themes = self.random_select(THEMES, num=3) + def random_select_str(self, arr: list[str], num: int = 5) -> str: + choices = self.random_select(arr, num) + return ", ".join(choices) + ", etc" + def generate_prompt(self, short=True) -> str: if short: + theme = self.random_select(THEMES, num=1)[0] return ( - f"Generate a list of 10 questions or instruct tasks related to the themes of {themes}. " - f"Output each prompt on a new line without any extra commentary or special characters." + f"{theme}" ) + themes = self.random_select_str(arr, num=3) relevance_level = random.randint(5, 20) complexity_level = random.randint(5, 20) @@ -34,5 +36,5 @@ def generate_prompt(self, short=True) -> str: def generate_role(self, short=True) -> str: if short: - return "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model." + return "You are my friend helping me study. I say theme and you ask me question about it. Quiestions have to be short but open. Write only question and nothing more." return "You are a prompt engineer tasked with prompts of varying complexity to test the capabilities of a new language model. For each prompt, consider what aspect of the language model's capabilities it is designed to test and ensure that the set of prompts covers a broad spectrum of potential use cases for the language model. Only output the prompts, one per line without any extra commentary. Do not use any special characters or formatting, numbering or styling in the output." diff --git a/src/compute_horde_prompt_gen/run.py b/src/compute_horde_prompt_gen/run.py index d2ee050..3c7723b 100644 --- a/src/compute_horde_prompt_gen/run.py +++ b/src/compute_horde_prompt_gen/run.py @@ -38,22 +38,22 @@ def generate_prompts( ) seconds_taken = (datetime.datetime.now() - start_ts).total_seconds() - log.info(f"{i=} generation took {seconds_taken:.2f}s") new_prompts = [] for j, sequence in enumerate(sequences): generated_prompts = parse_output(sequence) - log.debug(f"{i=} sequence={j} {generated_prompts=} from {sequence=}") - - log.info(f"{i=} sequence={j} generated {len(generated_prompts)} prompts") new_prompts.extend(generated_prompts) # check_prompts_quality(new_prompts) # remove any duplicates new_prompts = list(set(new_prompts)) - + log.info(f"{i=} generation took {seconds_taken:.2f}s; generated {len(new_prompts)} prompts") if total_prompts - len(new_prompts) < 0: + # one might want to optimize here and save additional prompts for next batch, + # but it is so parametrized that it produces on average additional 10 prompts + # so to fill 240 we would nedd 24 runs - each 10s to produce additional batch + # for free - saving 10s - so about 4% gain - not worth it :) new_prompts = new_prompts[:total_prompts] total_prompts -= len(new_prompts) diff --git a/src/compute_horde_prompt_gen/utils.py b/src/compute_horde_prompt_gen/utils.py index 5825cab..7b623b9 100644 --- a/src/compute_horde_prompt_gen/utils.py +++ b/src/compute_horde_prompt_gen/utils.py @@ -8,22 +8,30 @@ def clean_line(line: str) -> str: line = line.strip() + head, sep, tail = line.partition('<|') + if head: + line = head.strip() + else: + # if we started with a tag we assume that inside we find our prompt + line = tail.partition('|>')[2].partition('<|')[0].strip() # remove list numbering if present line = re.sub(r"^\s*\d+\.?\s*", "", line) + # strip quotations + line = line.strip('"\'') return line def parse_output(output: str) -> list[str]: # split into lines and clean them lines = output.split("\n") - lines = [clean_line(line) for line in lines] - - # filter out null lines or prompts that are too short or long - lines = [line for line in lines if (len(line) > 10 and len(line) < 300)] - - # skip first line as that's frequently broken (i.e. "Here are the prompts:") - # skip last line as it might not be comletely generated - return lines[1:-1] + for line in lines: + clean_line = clean_line(line) + # we skip if line is too short or too long and not ends with ? + # in most cases it would be just first line + if len(clean_line) > 10 and len(clean_line) < 300 and line.endswith('?'): + return [line] + + return [] def check_prompts_quality(prompts: list[str]): From d1d56aa79deeef789ca8c56a682145d576f43e3d Mon Sep 17 00:00:00 2001 From: Michal Zukowski Date: Mon, 23 Sep 2024 23:22:24 +0200 Subject: [PATCH 2/5] Liner fixes --- src/compute_horde_prompt_gen/prompt.py | 6 ++---- src/compute_horde_prompt_gen/run.py | 23 +++++++---------------- src/compute_horde_prompt_gen/utils.py | 10 +++++----- 3 files changed, 14 insertions(+), 25 deletions(-) diff --git a/src/compute_horde_prompt_gen/prompt.py b/src/compute_horde_prompt_gen/prompt.py index 72b8c20..96e18a5 100644 --- a/src/compute_horde_prompt_gen/prompt.py +++ b/src/compute_horde_prompt_gen/prompt.py @@ -13,10 +13,8 @@ def random_select_str(self, arr: list[str], num: int = 5) -> str: def generate_prompt(self, short=True) -> str: if short: theme = self.random_select(THEMES, num=1)[0] - return ( - f"{theme}" - ) - themes = self.random_select_str(arr, num=3) + return f"{theme}" + themes = self.random_select_str(THEMES, num=3) relevance_level = random.randint(5, 20) complexity_level = random.randint(5, 20) diff --git a/src/compute_horde_prompt_gen/run.py b/src/compute_horde_prompt_gen/run.py index 3c7723b..328a8a8 100644 --- a/src/compute_horde_prompt_gen/run.py +++ b/src/compute_horde_prompt_gen/run.py @@ -48,7 +48,9 @@ def generate_prompts( # remove any duplicates new_prompts = list(set(new_prompts)) - log.info(f"{i=} generation took {seconds_taken:.2f}s; generated {len(new_prompts)} prompts") + log.info( + f"{i=} generation took {seconds_taken:.2f}s; generated {len(new_prompts)} prompts" + ) if total_prompts - len(new_prompts) < 0: # one might want to optimize here and save additional prompts for next batch, # but it is so parametrized that it produces on average additional 10 prompts @@ -74,19 +76,19 @@ def generate_prompts( parser.add_argument( "--batch_size", type=int, - default=20, + default=262, # on A6000 we want 240 prompts generated in single file, but not all results are valid help="Batch size - number of prompts given as input per generation request", ) parser.add_argument( "--num_return_sequences", type=int, - default=5, + default=1, # better to generate as many as possible prompts on different themes help="Number of return sequences outputted for each prompt given as input", ) parser.add_argument( "--max_new_tokens", type=int, - default=500, + default=40, # 40 new tokens is enough for reasonable length prompt - 30 caused too much cut off prompts help="Max new tokens", ) parser.add_argument( @@ -108,16 +110,10 @@ def generate_prompts( default="./saved_models/", help="Path to load the model and tokenizer from", ) - parser.add_argument( - "--number_of_batches", - type=int, - default=None, - help="Number of batches to generate", - ) parser.add_argument( "--number_of_prompts_per_batch", type=int, - required=True, + default=240, help="Number of prompts per uuid batch", ) parser.add_argument( @@ -137,11 +133,6 @@ def generate_prompts( uuids = args.uuids.split(",") - if args.number_of_batches: - assert ( - len(uuids) == args.number_of_batches - ), "Number of uuids should be equal to number of batches requested" - model_path = os.path.join(args.model_path, args.model_name) if args.model_name == "mock": model = MockModel() diff --git a/src/compute_horde_prompt_gen/utils.py b/src/compute_horde_prompt_gen/utils.py index 7b623b9..2511ac7 100644 --- a/src/compute_horde_prompt_gen/utils.py +++ b/src/compute_horde_prompt_gen/utils.py @@ -8,16 +8,16 @@ def clean_line(line: str) -> str: line = line.strip() - head, sep, tail = line.partition('<|') + head, sep, tail = line.partition("<|") if head: line = head.strip() else: # if we started with a tag we assume that inside we find our prompt - line = tail.partition('|>')[2].partition('<|')[0].strip() + line = tail.partition("|>")[2].partition("<|")[0].strip() # remove list numbering if present line = re.sub(r"^\s*\d+\.?\s*", "", line) # strip quotations - line = line.strip('"\'') + line = line.strip("\"'") return line @@ -25,10 +25,10 @@ def parse_output(output: str) -> list[str]: # split into lines and clean them lines = output.split("\n") for line in lines: - clean_line = clean_line(line) + cleaned_line = clean_line(line) # we skip if line is too short or too long and not ends with ? # in most cases it would be just first line - if len(clean_line) > 10 and len(clean_line) < 300 and line.endswith('?'): + if len(cleaned_line) > 10 and len(cleaned_line) < 300 and line.endswith("?"): return [line] return [] From 645b6d627769c9f78285054db40c2a57455f55d9 Mon Sep 17 00:00:00 2001 From: Michal Zukowski Date: Mon, 23 Sep 2024 23:27:20 +0200 Subject: [PATCH 3/5] Fix smoke test after dropping --number_of_batches --- .github/workflows/smoke_test.yml | 2 +- src/compute_horde_prompt_gen/utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/smoke_test.yml b/.github/workflows/smoke_test.yml index 216fe3b..2a59852 100644 --- a/.github/workflows/smoke_test.yml +++ b/.github/workflows/smoke_test.yml @@ -28,7 +28,7 @@ jobs: run: | cd src/compute_horde_prompt_gen - python3 run.py --output_folder_path output/ --model_name mock --number_of_batches 5 --number_of_prompts_per_batch 20 --uuids uuid1,uuid2,uuid3,uuid4,uuid5 + python3 run.py --output_folder_path output/ --model_name mock --number_of_prompts_per_batch 20 --uuids uuid1,uuid2,uuid3,uuid4,uuid5 echo -e "\ngenerated batches:" ls diff --git a/src/compute_horde_prompt_gen/utils.py b/src/compute_horde_prompt_gen/utils.py index 2511ac7..03f7712 100644 --- a/src/compute_horde_prompt_gen/utils.py +++ b/src/compute_horde_prompt_gen/utils.py @@ -28,8 +28,8 @@ def parse_output(output: str) -> list[str]: cleaned_line = clean_line(line) # we skip if line is too short or too long and not ends with ? # in most cases it would be just first line - if len(cleaned_line) > 10 and len(cleaned_line) < 300 and line.endswith("?"): - return [line] + if len(cleaned_line) > 10 and len(cleaned_line) < 300 and cleaned_line.endswith("?"): + return [cleaned_line] return [] From a99fa7891872505a463ec4f9dfc57fdb3a172f05 Mon Sep 17 00:00:00 2001 From: Michal Zukowski Date: Tue, 24 Sep 2024 10:56:01 +0200 Subject: [PATCH 4/5] Add use of leftover_prompts --- src/compute_horde_prompt_gen/run.py | 24 ++++++++++++++++++------ src/compute_horde_prompt_gen/utils.py | 6 +++++- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/src/compute_horde_prompt_gen/run.py b/src/compute_horde_prompt_gen/run.py index 328a8a8..b2ccfd3 100644 --- a/src/compute_horde_prompt_gen/run.py +++ b/src/compute_horde_prompt_gen/run.py @@ -2,6 +2,7 @@ import os import logging import argparse +from collections import deque from prompt import PromptGeneratingPrompt from model import MockModel, Llama3, Phi3 @@ -19,7 +20,10 @@ def generate_prompts( max_new_tokens: int = 2000, temperature: float = 1.0, filepath: str = "prompts.txt", + leftover_prompts: deque = None, ): + if leftover_prompts is None: + leftover_prompts = deque() prompt_generator = PromptGeneratingPrompt() i = -1 @@ -51,11 +55,15 @@ def generate_prompts( log.info( f"{i=} generation took {seconds_taken:.2f}s; generated {len(new_prompts)} prompts" ) - if total_prompts - len(new_prompts) < 0: - # one might want to optimize here and save additional prompts for next batch, - # but it is so parametrized that it produces on average additional 10 prompts - # so to fill 240 we would nedd 24 runs - each 10s to produce additional batch - # for free - saving 10s - so about 4% gain - not worth it :) + + # Use leftover prompts from previous batch if available + while leftover_prompts and total_prompts > 0: + new_prompts.append(leftover_prompts.popleft()) + total_prompts -= 1 + + if len(new_prompts) > total_prompts: + # Save extra prompts for next batch + leftover_prompts.extend(new_prompts[total_prompts:]) new_prompts = new_prompts[:total_prompts] total_prompts -= len(new_prompts) @@ -64,6 +72,8 @@ def generate_prompts( if total_prompts == 0: break + return leftover_prompts + if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate prompts") @@ -149,9 +159,10 @@ def generate_prompts( else: raise ValueError(f"Invalid model name: {args.model_name}") + leftover_prompts = None for uuid in uuids: start_ts = datetime.datetime.now() - generate_prompts( + leftover_prompts = generate_prompts( model, total_prompts=args.number_of_prompts_per_batch, batch_size=args.batch_size, @@ -159,6 +170,7 @@ def generate_prompts( max_new_tokens=args.max_new_tokens, temperature=args.temperature, filepath=os.path.join(args.output_folder_path, f"prompts_{uuid}.txt"), + leftover_prompts=leftover_prompts, ) seconds_taken = (datetime.datetime.now() - start_ts).total_seconds() log.info( diff --git a/src/compute_horde_prompt_gen/utils.py b/src/compute_horde_prompt_gen/utils.py index 03f7712..229c26a 100644 --- a/src/compute_horde_prompt_gen/utils.py +++ b/src/compute_horde_prompt_gen/utils.py @@ -28,7 +28,11 @@ def parse_output(output: str) -> list[str]: cleaned_line = clean_line(line) # we skip if line is too short or too long and not ends with ? # in most cases it would be just first line - if len(cleaned_line) > 10 and len(cleaned_line) < 300 and cleaned_line.endswith("?"): + if ( + len(cleaned_line) > 10 + and len(cleaned_line) < 300 + and cleaned_line.endswith("?") + ): return [cleaned_line] return [] From e5d448b2f6f39d6ac8b0f40d828eaafb3cc009df Mon Sep 17 00:00:00 2001 From: Michal Zukowski Date: Thu, 26 Sep 2024 13:01:36 +0200 Subject: [PATCH 5/5] Fix handling leftover prompts --- src/compute_horde_prompt_gen/run.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/compute_horde_prompt_gen/run.py b/src/compute_horde_prompt_gen/run.py index b2ccfd3..0034b0b 100644 --- a/src/compute_horde_prompt_gen/run.py +++ b/src/compute_horde_prompt_gen/run.py @@ -57,9 +57,8 @@ def generate_prompts( ) # Use leftover prompts from previous batch if available - while leftover_prompts and total_prompts > 0: + while leftover_prompts and len(new_prompts) < total_prompts: new_prompts.append(leftover_prompts.popleft()) - total_prompts -= 1 if len(new_prompts) > total_prompts: # Save extra prompts for next batch