Skip to content

Commit

Permalink
remove notification after user commits
Browse files Browse the repository at this point in the history
  • Loading branch information
binary-husky committed Nov 1, 2023
1 parent 48e0e9f commit 720a7dc
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 16 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,6 @@ wolfram.txt

# DB on disk for TeachableAgent
tmp/

# logs
gpt_log
20 changes: 5 additions & 15 deletions autogen/gradio_gui/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,6 @@ def init_config_list():
llm_config = {"config_list": config_list}
return llm_config

# def init_config_list():
# return {
# "config_list": [
# {
# "model": "gpt-3.5-turbo-16k",
# "api_key": "",
# "api_base": "https://gptac.openai.azure.com/openai/deployments/35-16k/chat/completions?api-version=2023-05-15",
# "api_type": "azure",
# "api_version": "2023-07-01-preview"
# }
# ]
# }

def init_config():
import void_terminal
Expand All @@ -67,19 +55,21 @@ def init_config():
void_terminal.set_conf(key="DARK_MODE", value=True)
void_terminal.set_conf(key="AUTO_CLEAR_TXT", value=True)

# this only influence direct chat, not autogen
# the following configurations only influence direct chat, not autogen
void_terminal.set_conf(key="API_KEY", value=llm_config["config_list"][0]["api_key"])
void_terminal.set_conf(key="LLM_MODEL", value=llm_config["config_list"][0]["model"])
# void_terminal.set_conf(key="API_KEY",value="sk-yourapikey")
# void_terminal.set_conf(key="LLM_MODEL", value="gpt-3.5-turbo-16k")
if llm_config["config_list"][0].get('api_type', '') == 'azure':
model = 'azure-'+llm_config["config_list"][0]["model"]
api_base = llm_config["config_list"][0]["api_base"]
if api_base.endswith('/'): api_base = api_base[:-1]
AZURE_CFG_ARRAY = {
model:
{
"AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"].split('openai/deployments/')[0],
"AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"] + '/',
"AZURE_API_KEY": llm_config["config_list"][0]["api_key"],
"AZURE_ENGINE": llm_config["config_list"][0]["api_base"].split('openai/deployments/')[1].split('/chat/completions')[0],
"AZURE_ENGINE": llm_config["config_list"][0]["deployment_id"],
"AZURE_MODEL_MAX_TOKEN": 8192,
},
}
Expand Down
5 changes: 5 additions & 0 deletions autogen/gradio_gui/utils/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,9 @@ def main_process_ui_control(self, txt, create_or_resume) -> str:
if create_or_resume == 'create':
self.cnt = 1
self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐
else:
if 'Waiting for further instructions.' in self.chatbot[-1][-1]:
self.chatbot.pop(-1) # remove the last line
self.send_command(txt)

if txt == 'exit':
Expand All @@ -112,6 +115,8 @@ def main_process_ui_control(self, txt, create_or_resume) -> str:
if self.parent_conn.poll():
if '[GPT-Academic] waiting' in self.chatbot[-1][-1]:
self.chatbot.pop(-1) # remove the last line
if 'Waiting for further instructions.' in self.chatbot[-1][-1]:
self.chatbot.pop(-1) # remove the last line
msg = self.parent_conn.recv() # PipeCom
if msg.cmd == "done":
self.chatbot.append([f"terminate", msg.content])
Expand Down
50 changes: 49 additions & 1 deletion launch_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,56 @@
from autogen.gradio_gui.plugin import autogen_terminal
from autogen.gradio_gui.gradio_service import main
from autogen.gradio_gui import install_dependencies, init_config

install_dependencies()

def init_config_list():
return {
"config_list": [
{
"model": "gpt-3.5-turbo-16k",
"api_key": "82a0be415a9f4871ada57ebed389337b",
"api_base": "https://gptac.openai.azure.com",
"api_type": "azure",
"api_version": "2023-07-01-preview",
"deployment_id": "35-16k",
}
]
}

def init_config():
import void_terminal

llm_config = init_config_list()
# set network proxy
# void_terminal.set_conf(key="USE_PROXY", value=True)
# void_terminal.set_conf(key="proxies", value='{"http": "http://localhost:10881", "https": "http://localhost:10881"}')
void_terminal.set_conf(key="AUTOGEN_USE_DOCKER", value=False)
void_terminal.set_conf(key="PATH_LOGGING", value="gpt_log")
void_terminal.set_conf(key="DARK_MODE", value=True)
void_terminal.set_conf(key="AUTO_CLEAR_TXT", value=True)

# the following configurations only influence direct chat, not autogen
void_terminal.set_conf(key="API_KEY", value=llm_config["config_list"][0]["api_key"])
void_terminal.set_conf(key="LLM_MODEL", value=llm_config["config_list"][0]["model"])
# void_terminal.set_conf(key="API_KEY",value="sk-yourapikey")
# void_terminal.set_conf(key="LLM_MODEL", value="gpt-3.5-turbo-16k")
if llm_config["config_list"][0].get('api_type', '') == 'azure':
model = 'azure-'+llm_config["config_list"][0]["model"]
api_base = llm_config["config_list"][0]["api_base"]
if api_base.endswith('/'): api_base = api_base[:-1]
AZURE_CFG_ARRAY = {
model:
{
"AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"] + '/',
"AZURE_API_KEY": llm_config["config_list"][0]["api_key"],
"AZURE_ENGINE": llm_config["config_list"][0]["deployment_id"],
"AZURE_MODEL_MAX_TOKEN": 8192,
},
}
void_terminal.set_conf(key="LLM_MODEL", value=model)
void_terminal.set_conf(key="AZURE_CFG_ARRAY", value=str(AZURE_CFG_ARRAY))
return llm_config

llm_config = init_config()

class AutoGenAskHuman(AutoGenGeneral):
Expand Down

0 comments on commit 720a7dc

Please sign in to comment.