From 720a7dc60d369742bf4b049d9971c7b85b9158fd Mon Sep 17 00:00:00 2001 From: binary-husky Date: Wed, 1 Nov 2023 16:13:06 +0800 Subject: [PATCH] remove notification after user commits --- .gitignore | 3 ++ autogen/gradio_gui/__init__.py | 20 ++++--------- autogen/gradio_gui/utils/pipe.py | 5 ++++ launch_gui.py | 50 +++++++++++++++++++++++++++++++- 4 files changed, 62 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index 47917823422c..e94a108e42a2 100644 --- a/.gitignore +++ b/.gitignore @@ -167,3 +167,6 @@ wolfram.txt # DB on disk for TeachableAgent tmp/ + +# logs +gpt_log \ No newline at end of file diff --git a/autogen/gradio_gui/__init__.py b/autogen/gradio_gui/__init__.py index d4fd20ec1db3..0384320b43a1 100644 --- a/autogen/gradio_gui/__init__.py +++ b/autogen/gradio_gui/__init__.py @@ -42,18 +42,6 @@ def init_config_list(): llm_config = {"config_list": config_list} return llm_config -# def init_config_list(): -# return { -# "config_list": [ -# { -# "model": "gpt-3.5-turbo-16k", -# "api_key": "", -# "api_base": "https://gptac.openai.azure.com/openai/deployments/35-16k/chat/completions?api-version=2023-05-15", -# "api_type": "azure", -# "api_version": "2023-07-01-preview" -# } -# ] -# } def init_config(): import void_terminal @@ -67,19 +55,21 @@ def init_config(): void_terminal.set_conf(key="DARK_MODE", value=True) void_terminal.set_conf(key="AUTO_CLEAR_TXT", value=True) - # this only influence direct chat, not autogen + # the following configurations only influence direct chat, not autogen void_terminal.set_conf(key="API_KEY", value=llm_config["config_list"][0]["api_key"]) void_terminal.set_conf(key="LLM_MODEL", value=llm_config["config_list"][0]["model"]) # void_terminal.set_conf(key="API_KEY",value="sk-yourapikey") # void_terminal.set_conf(key="LLM_MODEL", value="gpt-3.5-turbo-16k") if llm_config["config_list"][0].get('api_type', '') == 'azure': model = 'azure-'+llm_config["config_list"][0]["model"] + api_base = llm_config["config_list"][0]["api_base"] + if api_base.endswith('/'): api_base = api_base[:-1] AZURE_CFG_ARRAY = { model: { - "AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"].split('openai/deployments/')[0], + "AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"] + '/', "AZURE_API_KEY": llm_config["config_list"][0]["api_key"], - "AZURE_ENGINE": llm_config["config_list"][0]["api_base"].split('openai/deployments/')[1].split('/chat/completions')[0], + "AZURE_ENGINE": llm_config["config_list"][0]["deployment_id"], "AZURE_MODEL_MAX_TOKEN": 8192, }, } diff --git a/autogen/gradio_gui/utils/pipe.py b/autogen/gradio_gui/utils/pipe.py index 7469db2311be..cd8bd01a271e 100644 --- a/autogen/gradio_gui/utils/pipe.py +++ b/autogen/gradio_gui/utils/pipe.py @@ -99,6 +99,9 @@ def main_process_ui_control(self, txt, create_or_resume) -> str: if create_or_resume == 'create': self.cnt = 1 self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐ + else: + if 'Waiting for further instructions.' in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line self.send_command(txt) if txt == 'exit': @@ -112,6 +115,8 @@ def main_process_ui_control(self, txt, create_or_resume) -> str: if self.parent_conn.poll(): if '[GPT-Academic] waiting' in self.chatbot[-1][-1]: self.chatbot.pop(-1) # remove the last line + if 'Waiting for further instructions.' in self.chatbot[-1][-1]: + self.chatbot.pop(-1) # remove the last line msg = self.parent_conn.recv() # PipeCom if msg.cmd == "done": self.chatbot.append([f"terminate", msg.content]) diff --git a/launch_gui.py b/launch_gui.py index 0cabb54cd4b6..2d723644846e 100644 --- a/launch_gui.py +++ b/launch_gui.py @@ -3,8 +3,56 @@ from autogen.gradio_gui.plugin import autogen_terminal from autogen.gradio_gui.gradio_service import main from autogen.gradio_gui import install_dependencies, init_config - install_dependencies() + +def init_config_list(): + return { + "config_list": [ + { + "model": "gpt-3.5-turbo-16k", + "api_key": "82a0be415a9f4871ada57ebed389337b", + "api_base": "https://gptac.openai.azure.com", + "api_type": "azure", + "api_version": "2023-07-01-preview", + "deployment_id": "35-16k", + } + ] + } + +def init_config(): + import void_terminal + + llm_config = init_config_list() + # set network proxy + # void_terminal.set_conf(key="USE_PROXY", value=True) + # void_terminal.set_conf(key="proxies", value='{"http": "http://localhost:10881", "https": "http://localhost:10881"}') + void_terminal.set_conf(key="AUTOGEN_USE_DOCKER", value=False) + void_terminal.set_conf(key="PATH_LOGGING", value="gpt_log") + void_terminal.set_conf(key="DARK_MODE", value=True) + void_terminal.set_conf(key="AUTO_CLEAR_TXT", value=True) + + # the following configurations only influence direct chat, not autogen + void_terminal.set_conf(key="API_KEY", value=llm_config["config_list"][0]["api_key"]) + void_terminal.set_conf(key="LLM_MODEL", value=llm_config["config_list"][0]["model"]) + # void_terminal.set_conf(key="API_KEY",value="sk-yourapikey") + # void_terminal.set_conf(key="LLM_MODEL", value="gpt-3.5-turbo-16k") + if llm_config["config_list"][0].get('api_type', '') == 'azure': + model = 'azure-'+llm_config["config_list"][0]["model"] + api_base = llm_config["config_list"][0]["api_base"] + if api_base.endswith('/'): api_base = api_base[:-1] + AZURE_CFG_ARRAY = { + model: + { + "AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"] + '/', + "AZURE_API_KEY": llm_config["config_list"][0]["api_key"], + "AZURE_ENGINE": llm_config["config_list"][0]["deployment_id"], + "AZURE_MODEL_MAX_TOKEN": 8192, + }, + } + void_terminal.set_conf(key="LLM_MODEL", value=model) + void_terminal.set_conf(key="AZURE_CFG_ARRAY", value=str(AZURE_CFG_ARRAY)) + return llm_config + llm_config = init_config() class AutoGenAskHuman(AutoGenGeneral):