Skip to content

Commit

Permalink
upload low-code llm codes
Browse files Browse the repository at this point in the history
  • Loading branch information
dawnmsg authored Apr 20, 2023
1 parent 4598ad5 commit 91475b3
Show file tree
Hide file tree
Showing 15 changed files with 941 additions and 0 deletions.
15 changes: 15 additions & 0 deletions LowCodeLLM/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
FROM ubuntu:22.04

RUN apt-get -y update
RUN apt-get install -y git python3.11 python3-pip supervisor
RUN pip3 install --upgrade pip
RUN pip3 install --upgrade setuptools
RUN ln -s /usr/bin/python3 /usr/bin/python
COPY src/requirements.txt requirements.txt
RUN pip3 install -r requirements.txt

COPY src /app/src

WORKDIR /app/src
ENV WORKERS 2
CMD supervisord -c supervisord.conf
59 changes: 59 additions & 0 deletions LowCodeLLM/src/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import os
from flask import Flask, request
from flask_cors import CORS, cross_origin
from lowCodeLLM import lowCodeLLM
from flask.logging import default_handler
import logging

app = Flask('lowcode-llm', static_url_path='', template_folder='')
app.debug = True
llm = lowCodeLLM()
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger = gunicorn_logger
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
default_handler.setFormatter(logging_format)

@app.route('/api/get_workflow', methods=['POST'])
@cross_origin()
def get_workflow():
try:
request_content = request.get_json()
task_prompt = request_content['task_prompt']
workflow = llm.get_workflow(task_prompt)
return workflow, 200
except Exception as e:
app.logger.error(
'failed to get_workflow, msg:%s, request data:%s' % (str(e), request.json))
return {'errmsg': str(e)}, 500

@app.route('/api/extend_workflow', methods=['POST'])
@cross_origin()
def extend_workflow():
try:
request_content = request.get_json()
task_prompt = request_content['task_prompt']
current_workflow = request_content['current_workflow']
step = request_content['step']
sub_workflow = llm.extend_workflow(task_prompt, current_workflow, step)
return sub_workflow, 200
except Exception as e:
app.logger.error(
'failed to extend_workflow, msg:%s, request data:%s' % (str(e), request.json))
return {'errmsg': str(e)}, 500

@app.route('/api/execute', methods=['POST'])
@cross_origin()
def execute():
try:
request_content = request.get_json()
task_prompt = request_content['task_prompt']
confirmed_workflow = request_content['confirmed_workflow']
curr_input = request_content['curr_input']
history = request_content['history']
response = llm.execute(task_prompt,confirmed_workflow, history, curr_input)
return response, 200
except Exception as e:
app.logger.error(
'failed to execute, msg:%s, request data:%s' % (str(e), request.json))
return {'errmsg': str(e)}, 500
41 changes: 41 additions & 0 deletions LowCodeLLM/src/executingLLM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from openAIWrapper import OpenAIWrapper

EXECUTING_LLM_PREFIX = """Executing LLM is designed to provide outstanding responses.
Executing LLM will be given a overall task as the background of the conversation between the Executing LLM and human.
When providing response, Executing LLM MUST STICTLY follow the provided standard operating procedure (SOP).
the SOP is formatted as:
'''
STEP 1: [step name][step descriptions][[[if 'condition1'][Jump to STEP]], [[if 'condition2'][Jump to STEP]], ...]
STEP 2: [step name][step descriptions][[[if 'condition1'][Jump to STEP]], [[if 'condition2'][Jump to STEP]], ...]
'''
here "[[[if 'condition1'][Jump to STEP n]], [[if 'condition2'][Jump to STEP m]], ...]" is judgmental logic. It means when you're performing this step,
and if 'condition1' is satisfied, you will perform STEP n next. If 'condition2' is satisfied, you will perform STEP m next.
Remember:
Executing LLM is facing a real human, who does not know what SOP is.
So, Do not show him/her the SOP steps you are following, or the process and middle results of performing the SOP. It will make him/her confused. Just response the answer.
"""

EXECUTING_LLM_SUFFIX = """
Remember:
Executing LLM is facing a real human, who does not know what SOP is.
So, Do not show him/her the SOP steps you are following, or the process and middle results of performing the SOP. It will make him/her confused. Just response the answer.
"""

class executingLLM:
def __init__(self, temperature) -> None:
self.prefix = EXECUTING_LLM_PREFIX
self.suffix = EXECUTING_LLM_SUFFIX
self.LLM = OpenAIWrapper(temperature)
self.messages = [{"role": "system", "content": "You are a helpful assistant."},
{"role": "system", "content": self.prefix}]

def execute(self, current_prompt, history):
''' provide LLM the dialogue history and the current prompt to get response '''
messages = self.messages + history
messages.append({'role': 'user', "content": current_prompt + self.suffix})
response, status = self.LLM.run(messages)
if status:
return response
else:
return "OpenAI API error."
Loading

0 comments on commit 91475b3

Please sign in to comment.