Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
binary-husky authored Nov 1, 2023
2 parents 18362ba + 957ec00 commit 257df15
Show file tree
Hide file tree
Showing 15 changed files with 370 additions and 45 deletions.
5 changes: 3 additions & 2 deletions .github/workflows/openai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ jobs:
run: |
pip install -e .[teachable]
- name: Install packages for RetrieveChat with QDrant when needed
if: matrix.python-version == '3.9'
if: matrix.python-version == '3.11'
run: |
pip install qdrant_client[fastembed]
pip install -e .[retrievechat] qdrant_client[fastembed]
- name: Coverage
if: matrix.python-version == '3.9'
env:
Expand All @@ -80,6 +80,7 @@ jobs:
OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }}
run: |
pip install nbconvert nbformat ipykernel
coverage run -a -m pytest test/agentchat/test_qdrant_retrievechat.py
coverage run -a -m pytest test/test_with_openai.py
coverage run -a -m pytest test/test_notebook.py
coverage xml
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

## Contributers Wall
## Contributors Wall
<a href="https://github.com/microsoft/autogen/graphs/contributors">
<img src="https://contrib.rocks/image?repo=microsoft/autogen" />
</a>
Expand Down
2 changes: 1 addition & 1 deletion autogen/agentchat/contrib/math_user_proxy_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def __init__(
self.last_reply = None

def generate_init_message(self, problem, prompt_type="default", customized_prompt=None):
"""Generate a prompt for the assitant agent with the given problem and prompt.
"""Generate a prompt for the assistant agent with the given problem and prompt.
Args:
problem (str): the problem to be solved.
Expand Down
158 changes: 158 additions & 0 deletions autogen/agentchat/conversable_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ def __init__(
self.register_reply([Agent, None], ConversableAgent.generate_oai_reply)
self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
self.register_reply([Agent, None], ConversableAgent.generate_async_function_call_reply)
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)

def register_reply(
Expand Down Expand Up @@ -661,6 +662,28 @@ def generate_function_call_reply(
return True, func_return
return False, None

async def generate_async_function_call_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
config: Optional[Any] = None,
):
"""Generate a reply using async function call."""
if config is None:
config = self
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
if "function_call" in message:
func_call = message["function_call"]
func_name = func_call.get("name", "")
func = self._function_map.get(func_name, None)
if func and asyncio.coroutines.iscoroutinefunction(func):
_, func_return = await self.a_execute_function(func_call)
return True, func_return

return False, None

def check_termination_and_human_reply(
self,
messages: Optional[List[Dict]] = None,
Expand Down Expand Up @@ -732,6 +755,77 @@ def check_termination_and_human_reply(

return False, None

async def a_check_termination_and_human_reply(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
config: Optional[Any] = None,
) -> Tuple[bool, Union[str, Dict, None]]:
"""(async) Check if the conversation should be terminated, and if human reply is provided."""
if config is None:
config = self
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
reply = ""
no_human_input_msg = ""
if self.human_input_mode == "ALWAYS":
reply = await self.a_get_human_input(
f"Provide feedback to {sender.name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: "
)
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
reply = reply if reply or not self._is_termination_msg(message) else "exit"
else:
if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
if self.human_input_mode == "NEVER":
reply = "exit"
else:
# self.human_input_mode == "TERMINATE":
terminate = self._is_termination_msg(message)
reply = await self.a_get_human_input(
f"Please give feedback to {sender.name}. Press enter or type 'exit' to stop the conversation: "
if terminate
else f"Please give feedback to {sender.name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: "
)
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
reply = reply if reply or not terminate else "exit"
elif self._is_termination_msg(message):
if self.human_input_mode == "NEVER":
reply = "exit"
else:
# self.human_input_mode == "TERMINATE":
reply = await self.a_get_human_input(
f"Please give feedback to {sender.name}. Press enter or type 'exit' to stop the conversation: "
)
no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
# if the human input is empty, and the message is a termination message, then we will terminate the conversation
reply = reply or "exit"

# print the no_human_input_msg
if no_human_input_msg:
print(colored(f"\n>>>>>>>> {no_human_input_msg}", "red"), flush=True)

# stop the conversation
if reply == "exit":
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender] = 0
return True, None

# send the human reply
if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
# reset the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender] = 0
return True, reply

# increment the consecutive_auto_reply_counter
self._consecutive_auto_reply_counter[sender] += 1
if self.human_input_mode != "NEVER":
print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)

return False, None

def generate_reply(
self,
messages: Optional[List[Dict]] = None,
Expand Down Expand Up @@ -868,6 +962,20 @@ def get_human_input(self, prompt: str) -> str:
reply = input(prompt)
return reply

async def a_get_human_input(self, prompt: str) -> str:
"""(Async) Get human input.
Override this method to customize the way to get human input.
Args:
prompt (str): prompt for the human input.
Returns:
str: human input.
"""
reply = input(prompt)
return reply

def run_code(self, code, **kwargs):
"""Run the code and return the result.
Expand Down Expand Up @@ -1002,6 +1110,56 @@ def execute_function(self, func_call):
"content": str(content),
}

async def a_execute_function(self, func_call):
"""Execute an async function call and return the result.
Override this function to modify the way async functions are executed.
Args:
func_call: a dictionary extracted from openai message at key "function_call" with keys "name" and "arguments".
Returns:
A tuple of (is_exec_success, result_dict).
is_exec_success (boolean): whether the execution is successful.
result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
"""
func_name = func_call.get("name", "")
func = self._function_map.get(func_name, None)

is_exec_success = False
if func is not None:
# Extract arguments from a json-like string and put it into a dict.
input_string = self._format_json_str(func_call.get("arguments", "{}"))
try:
arguments = json.loads(input_string)
except json.JSONDecodeError as e:
arguments = None
content = f"Error: {e}\n You argument should follow json format."

# Try to execute the function
if arguments is not None:
print(
colored(f"\n>>>>>>>> EXECUTING ASYNC FUNCTION {func_name}...", "magenta"),
flush=True,
)
try:
if asyncio.coroutines.iscoroutinefunction(func):
content = await func(**arguments)
else:
# Fallback to sync function if the function is not async
content = func(**arguments)
is_exec_success = True
except Exception as e:
content = f"Error: {e}"
else:
content = f"Error: Function {func_name} not found."

return is_exec_success, {
"name": func_name,
"role": "function",
"content": str(content),
}

def generate_init_message(self, **context) -> Union[str, Dict]:
"""Generate the initial message for the agent.
Expand Down
50 changes: 50 additions & 0 deletions autogen/agentchat/groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,12 @@ def __init__(
system_message=system_message,
**kwargs,
)
# Order of register_reply is important.
# Allow sync chat if initiated using initiate_chat
self.register_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
# Allow async chat if initiated using a_initiate_chat
self.register_reply(Agent, GroupChatManager.a_run_chat, config=groupchat, reset_config=GroupChat.reset)

# self._random = random.Random(seed)

def run_chat(
Expand Down Expand Up @@ -177,3 +182,48 @@ def run_chat(
speaker.send(reply, self, request_reply=False)
message = self.last_message(speaker)
return True, None

async def a_run_chat(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[Agent] = None,
config: Optional[GroupChat] = None,
):
"""Run a group chat asynchronously."""
if messages is None:
messages = self._oai_messages[sender]
message = messages[-1]
speaker = sender
groupchat = config
for i in range(groupchat.max_round):
# set the name to speaker's name if the role is not function
if message["role"] != "function":
message["name"] = speaker.name
groupchat.messages.append(message)
# broadcast the message to all agents except the speaker
for agent in groupchat.agents:
if agent != speaker:
await self.a_send(message, agent, request_reply=False, silent=True)
if i == groupchat.max_round - 1:
# the last round
break
try:
# select the next speaker
speaker = groupchat.select_speaker(speaker, self)
# let the speaker speak
reply = await speaker.a_generate_reply(sender=self)
except KeyboardInterrupt:
# let the admin agent speak if interrupted
if groupchat.admin_name in groupchat.agent_names:
# admin agent is one of the participants
speaker = groupchat.agent_by_name(groupchat.admin_name)
reply = await speaker.a_generate_reply(sender=self)
else:
# admin agent is not found in the participants
raise
if reply is None:
break
# The speaker sends the message without requesting a reply
await speaker.a_send(reply, self, request_reply=False)
message = self.last_message(speaker)
return True, None
3 changes: 1 addition & 2 deletions autogen/retrieve_utils.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
from typing import List, Union, Dict, Tuple, Callable
from typing import List, Union, Callable
import os
import requests
from urllib.parse import urlparse
import glob
import tiktoken
import chromadb

if chromadb.__version__ < "0.4.15":
Expand Down
2 changes: 1 addition & 1 deletion autogen/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.1.13"
__version__ = "0.1.14"
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
],
"blendsearch": ["flaml[blendsearch]"],
"mathchat": ["sympy", "pydantic==1.10.9", "wolframalpha"],
"retrievechat": ["chromadb", "tiktoken", "sentence_transformers", "pypdf"],
"retrievechat": ["chromadb", "tiktoken", "sentence_transformers", "pypdf", "ipython"],
"teachable": ["chromadb"],
"gui":[
"void-terminal>=0.0.9",
Expand Down
35 changes: 35 additions & 0 deletions test/agentchat/test_async_get_human_input.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import asyncio
import autogen
import pytest
from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST


@pytest.mark.asyncio
async def test_async_get_human_input():
try:
import openai
except ImportError:
return
config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)

# create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
max_consecutive_auto_reply=2,
llm_config={"request_timeout": 600, "seed": 41, "config_list": config_list, "temperature": 0},
)

user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)

async def custom_a_get_human_input(prompt):
return "This is a test"

user_proxy.a_get_human_input = custom_a_get_human_input

user_proxy.register_reply([autogen.Agent, None], autogen.ConversableAgent.a_check_termination_and_human_reply)

await user_proxy.a_initiate_chat(assistant, clear_history=True, message="Hello.")


if __name__ == "__main__":
test_async_get_human_input()
23 changes: 1 addition & 22 deletions test/agentchat/test_retrievechat.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from autogen.agentchat.contrib.retrieve_user_proxy_agent import (
RetrieveUserProxyAgent,
)
from autogen.retrieve_utils import create_vector_db_from_dir, query_vector_db
import chromadb
from chromadb.utils import embedding_functions as ef

Expand Down Expand Up @@ -61,6 +60,7 @@ def test_retrievechat():
"model": config_list[0]["model"],
"client": chromadb.PersistentClient(path="/tmp/chromadb"),
"embedding_function": sentence_transformer_ef,
"get_or_create": True,
},
)

Expand All @@ -72,26 +72,5 @@ def test_retrievechat():
print(conversations)


@pytest.mark.skipif(
sys.platform in ["darwin", "win32"] or skip_test,
reason="do not run on MacOS or windows",
)
def test_retrieve_utils():
client = chromadb.PersistentClient(path="/tmp/chromadb")
create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
results = query_vector_db(
query_texts=[
"How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
],
n_results=4,
client=client,
collection_name="autogen-docs",
search_string="AutoGen",
)
print(results["ids"][0])
assert len(results["ids"][0]) == 4


if __name__ == "__main__":
test_retrievechat()
test_retrieve_utils()
Loading

0 comments on commit 257df15

Please sign in to comment.