Skip to content

Commit

Permalink
test azure openai endpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
binary-husky committed Nov 1, 2023
1 parent 03999b8 commit 48e0e9f
Show file tree
Hide file tree
Showing 9 changed files with 269 additions and 220 deletions.
136 changes: 1 addition & 135 deletions autogen/gradio_gui/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@ python -m pip install pyautogen
Next, run following command launch GUI:

```bash
export API_KEY=<your-api-key>
export API_KEY=<your-api-key>
python -m autogen.launch_gui
export OAI_CONFIG_LIST='/path/to/OAI_CONFIG_LIST' && python -m autogen.launch_gui
```

> Note:
Expand All @@ -30,138 +28,6 @@ python -m autogen.launch_gui
- Then run it

```python
# <------------------- import ------------------->
from autogen.gradio_gui.gradio_service import main, install_dependencies
from autogen.gradio_gui.plugin import autogen_terminal
from autogen.gradio_gui.utils.general import AutoGenGeneral, AutoGenGroupChat
from void_terminal.toolbox import CatchException

# <------------------- define autogen agents (assistant + user_proxy) ------------------->
class AutoGenAskHuman(AutoGenGeneral):
def define_agents(self):
from autogen import AssistantAgent, UserProxyAgent
return [
{
"name": "assistant", # name of the agent.
"cls": AssistantAgent, # class of the agent.
},
{
"name": "user_proxy", # name of the agent.
"cls": UserProxyAgent, # class of the agent.
"human_input_mode": "ALWAYS", # always ask for human input.
"llm_config": False, # disables llm-based auto reply.
},
]


# <------------------- define autogen agents (group chat) ------------------->
class AutoGenGroupChat(AutoGenGroupChat):
def define_agents(self):
from autogen import AssistantAgent, UserProxyAgent
return [
{
"name": "Engineer", # name of the agent.
"cls": AssistantAgent, # class of the agent.
"system_message": '''Engineer. You follow an approved plan. You write python/shell code to solve tasks. Wrap the code in a code block that specifies the script type. The user can't modify your code. So do not suggest incomplete code which requires others to modify. Don't use a code block if it's not intended to be executed by the executor. Don't include multiple code blocks in one response. Do not ask others to copy and paste the result. Check the execution result returned by the executor. If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.'''
},
{
"name": "Scientist", # name of the agent.
"cls": AssistantAgent, # class of the agent.
"system_message": '''Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code.'''
},
{
"name": "Planner", # name of the agent.
"cls": AssistantAgent, # class of the agent.
"system_message": '''Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval. The plan may involve an engineer who can write code and a scientist who doesn't write code. Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.'''
},
{
"name": "Executor", # name of the agent.
"cls": UserProxyAgent, # class of the agent.
"human_input_mode": "NEVER",
"system_message": '''Executor. Execute the code written by the engineer and report the result.'''
},
{
"name": "Critic", # name of the agent.
"cls": AssistantAgent, # class of the agent.
"system_message": '''Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the plan includes adding verifiable info such as source URL.'''
},
{
"name": "user_proxy", # name of the agent.
"cls": UserProxyAgent, # class of the agent.
"human_input_mode": "NEVER", # never ask for human input.
"llm_config": False, # disables llm-based auto reply.
"code_execution_config": False,
"system_message": "A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.",
},
]




# <------------------- define autogen buttons ------------------->
@CatchException
def autogen_terminal_fn_01(*args, **kwargs):
return autogen_terminal(*args, AutoGenFn=AutoGenAskHuman, Callback="launch_gui->autogen_terminal_fn_01", **kwargs)

@CatchException
def autogen_terminal_fn_02(*args, **kwargs):
return autogen_terminal(*args, AutoGenFn=AutoGenGroupChat, Callback="launch_gui->autogen_terminal_fn_02", **kwargs)


if __name__ == "__main__":
# <------------------- change configurations ------------------->
import void_terminal

# void_terminal.set_conf(key="USE_PROXY", value=True)
# void_terminal.set_conf(key="proxies", value='{"http": "http://localhost:10881", "https": "http://localhost:10881"}')
void_terminal.set_conf(key="API_KEY",value="sk-yourapikey")
void_terminal.set_conf(key="LLM_MODEL", value="gpt-3.5-turbo-16k")
void_terminal.set_conf(key="AUTOGEN_USE_DOCKER", value=False)
void_terminal.set_conf(key="PATH_LOGGING", value="gpt_log")
void_terminal.set_conf(key="DARK_MODE", value=True)
void_terminal.set_conf(key="AUTO_CLEAR_TXT", value=True)


# <------------------- add fn buttons to GUI & launch gradio ------------------->
from void_terminal.crazy_functions.ConversationHistoryArchive import ConversationHistoryArchive
from void_terminal.crazy_functions.Accessibility import ClearCache
main(
{
# <------------------- autogen functions we defined above ------------------->
"AutoGen assitant": {
"Group": "Agent",
"Color": "stop",
"AsButton": True,
"AdvancedArgs": False,
"Function": autogen_terminal_fn_01
},
"AutoGen sci group chat": {
"Group": "Agent",
"Color": "stop",
"AsButton": True,
"AdvancedArgs": False,
"Function": autogen_terminal_fn_02
},

# <------------------- other functions from void terminal ------------------->
"Save the current conversation": {
"Group": "Conversation",
"Color": "stop",
"AsButton": True,
"Info": "Save current conversation | No input parameters required",
"AdvancedArgs": False,
"Function": ConversationHistoryArchive
},
"Clear all cache files": {
"Group": "Conversation",
"Color": "stop",
"AsButton": True,
"Info": "Clear all cache files,Handle with caution | No input parameters required",
"AdvancedArgs": False,
"Function": ClearCache
},
}
)

```

88 changes: 88 additions & 0 deletions autogen/gradio_gui/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
def install_dependencies():
# <------------------- install dependencies ------------------->
def try_install_deps(deps, reload_m=[]):
"""
install dependencies if not installed.
"""
input(f'You are about to install dependencies {str(deps)}, press Enter to continue ...')
import subprocess, sys, importlib
for dep in deps:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '--user', dep])
import site
importlib.reload(site)
for m in reload_m:
importlib.reload(__import__(m))

# <------------------- dependencies ------------------->
try:
import gradio as gr
import void_terminal
except:
try_install_deps(deps=["void-terminal>=0.0.9"])
try_install_deps(deps=["https://github.com/binary-husky/gpt_academic/raw/master/docs/gradio-3.32.6-py3-none-any.whl"])

if gr.__version__ not in ['3.32.6']:
# this is a special version of gradio, which is not available on pypi.org
try_install_deps(deps=["https://github.com/binary-husky/gpt_academic/raw/master/docs/gradio-3.32.6-py3-none-any.whl"])

def init_config_list():
import os
from autogen import config_list_from_json
config_file_path = os.environ.get("OAI_CONFIG_LIST")
if config_file_path is None:
raise EnvironmentError("""
OAI_CONFIG_LIST path is not set.
Please run with
`export OAI_CONFIG_LIST='/path/to/OAI_CONFIG_LIST'`
to set the path to config list file, and then run
`python -m autogen.launch_gui`
to start the GUI.
""")
config_list = config_list_from_json(env_or_file=config_file_path)
llm_config = {"config_list": config_list}
return llm_config

# def init_config_list():
# return {
# "config_list": [
# {
# "model": "gpt-3.5-turbo-16k",
# "api_key": "",
# "api_base": "https://gptac.openai.azure.com/openai/deployments/35-16k/chat/completions?api-version=2023-05-15",
# "api_type": "azure",
# "api_version": "2023-07-01-preview"
# }
# ]
# }

def init_config():
import void_terminal

llm_config = init_config_list()
# set network proxy
# void_terminal.set_conf(key="USE_PROXY", value=True)
# void_terminal.set_conf(key="proxies", value='{"http": "http://localhost:10881", "https": "http://localhost:10881"}')
void_terminal.set_conf(key="AUTOGEN_USE_DOCKER", value=False)
void_terminal.set_conf(key="PATH_LOGGING", value="gpt_log")
void_terminal.set_conf(key="DARK_MODE", value=True)
void_terminal.set_conf(key="AUTO_CLEAR_TXT", value=True)

# this only influence direct chat, not autogen
void_terminal.set_conf(key="API_KEY", value=llm_config["config_list"][0]["api_key"])
void_terminal.set_conf(key="LLM_MODEL", value=llm_config["config_list"][0]["model"])
# void_terminal.set_conf(key="API_KEY",value="sk-yourapikey")
# void_terminal.set_conf(key="LLM_MODEL", value="gpt-3.5-turbo-16k")
if llm_config["config_list"][0].get('api_type', '') == 'azure':
model = 'azure-'+llm_config["config_list"][0]["model"]
AZURE_CFG_ARRAY = {
model:
{
"AZURE_ENDPOINT": llm_config["config_list"][0]["api_base"].split('openai/deployments/')[0],
"AZURE_API_KEY": llm_config["config_list"][0]["api_key"],
"AZURE_ENGINE": llm_config["config_list"][0]["api_base"].split('openai/deployments/')[1].split('/chat/completions')[0],
"AZURE_MODEL_MAX_TOKEN": 8192,
},
}
void_terminal.set_conf(key="LLM_MODEL", value=model)
void_terminal.set_conf(key="AZURE_CFG_ARRAY", value=str(AZURE_CFG_ARRAY))
return llm_config
6 changes: 3 additions & 3 deletions autogen/gradio_gui/gradio_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def main(plugins):
raise ModuleNotFoundError(
"Use the built-in Gradio for the best experience!" +
"Please run `pip install -r https://github.com/binary-husky/gpt_academic/raw/master/docs/gradio-3.32.6-py3-none-any.whl` Command to install built-in Gradio and other dependencies, See details in requirements.txt.")
from void_terminal.request_llm.bridge_all import predict
from void_terminal.request_llms.bridge_all import predict
from void_terminal.toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
Expand Down Expand Up @@ -49,7 +49,7 @@ def main(plugins):
# from void_terminal.crazy_functional import get_crazy_functions
# plugins = get_crazy_functions()
# for k, v in plugins.items(): plugins[k]['Group'] = "Agent"
# DEFAULT_FN_GROUPS, = get_conf('DEFAULT_FN_GROUPS')
# DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS')
DEFAULT_FN_GROUPS = ["Agent", "Conversation"]
all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
match_group = lambda tags, groups: any([g in groups for g in tags.split('|')])
Expand Down Expand Up @@ -393,7 +393,7 @@ def init_cookie(cookies, chatbot):
"- plot $y=x^2$ with $x \in (-2,1)$, save the image to res.jpg\n\n" +
"- find the solution of $sin(x)=cos(x)$ by ploting the culve within $x > 0$, save the image to res.png\n\n" +
"- plot $z=cos(x^2+y^2)$, save the image to wave.jpg\n\n" +
"(2) click the small red button `AutoGen_Fn_01`."])
"(2) click the small red button `AutoGen ...`."])
return cookies, chatbot
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies, chatbot])
darkmode_js = """(dark) => {
Expand Down
4 changes: 2 additions & 2 deletions autogen/gradio_gui/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def autogen_terminal(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
web_port Port number on which the software is running
"""
# Check if the current model meets the requirements
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k']
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k', 'azure-gpt-3.5-turbo-16k', 'azure-gpt-4', 'azure-gpt-4-32k']
llm_kwargs['api_key'] = select_api_key(
llm_kwargs['api_key'], llm_kwargs['llm_model'])
if llm_kwargs['llm_model'] not in supported_llms:
Expand All @@ -39,7 +39,7 @@ def autogen_terminal(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
return

# Check if the current model meets the requirements
API_URL_REDIRECT, = get_conf('API_URL_REDIRECT')
API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
if len(API_URL_REDIRECT) > 0:
chatbot.append([f"Task: {txt}", f"Transfers are not supported."])
yield from update_ui(chatbot=chatbot, history=history)
Expand Down
22 changes: 4 additions & 18 deletions autogen/gradio_gui/utils/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,13 @@ def do_audogen(self, input):
input = input.content
with ProxyNetworkActivate("AutoGen"):
from autogen import AssistantAgent, UserProxyAgent
config_list = [{
'model': self.llm_kwargs['llm_model'],
'api_key': self.llm_kwargs['api_key'],
},]
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker}
agents = self.define_agents()
user_proxy = None
assistant = None
for agent_kwargs in agents:
agent_cls = agent_kwargs.pop('cls')
kwargs = {
'llm_config':{
"config_list": config_list,
},
'code_execution_config':code_execution_config
}
kwargs.update(agent_kwargs)
Expand Down Expand Up @@ -84,19 +77,12 @@ def do_audogen(self, input):
from autogen.gradio_gui.utils.pipe import PluginMultiprocessManager, PipeCom
input = input.content
with ProxyNetworkActivate("AutoGen"):
config_list = [{
'model': self.llm_kwargs['llm_model'],
'api_key': self.llm_kwargs['api_key'],
},]
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker}
agents = self.define_agents()
agents = []
for agent_kwargs in agents:
agent_cls = agent_kwargs.pop('cls')
kwargs = {
'llm_config':{
"config_list": config_list,
},
'code_execution_config':code_execution_config
}
kwargs.update(agent_kwargs)
Expand All @@ -108,12 +94,12 @@ def do_audogen(self, input):
user_proxy = agent_handle
try:
groupchat = autogen.GroupChat(agents=agents, messages=[], max_round=50)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config={
"temperature": 0,
"config_list": config_list,
})
manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config())
if user_proxy is None: raise Exception("user_proxy is not defined")
user_proxy.initiate_chat(manager, message=input)
except Exception as e:
tb_str = '```\n' + trimmed_format_exc() + '```'
self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str))

def define_group_chat_manager_config(self):
raise NotImplementedError
3 changes: 1 addition & 2 deletions autogen/gradio_gui/utils/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,13 @@ def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, w
# ⭐ run in main process
self.autogen_work_dir = os.path.join(get_log_folder('autogen'), gen_time_str())
self.previous_work_dir_files = {}
self.llm_kwargs = llm_kwargs
self.plugin_kwargs = plugin_kwargs
self.chatbot = chatbot
self.history = history
self.system_prompt = system_prompt
self.web_port = web_port
self.alive = True
self.use_docker, = get_conf('AUTOGEN_USE_DOCKER')
self.use_docker = get_conf('AUTOGEN_USE_DOCKER')

# create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
timeout_seconds = 5*60
Expand Down
Loading

0 comments on commit 48e0e9f

Please sign in to comment.