diff --git a/.github/workflows/openai.yml b/.github/workflows/openai.yml
index 339c36527ba6..5c334eec3b0d 100644
--- a/.github/workflows/openai.yml
+++ b/.github/workflows/openai.yml
@@ -57,9 +57,9 @@ jobs:
run: |
pip install -e .[teachable]
- name: Install packages for RetrieveChat with QDrant when needed
- if: matrix.python-version == '3.9'
+ if: matrix.python-version == '3.11'
run: |
- pip install qdrant_client[fastembed]
+ pip install -e .[retrievechat] qdrant_client[fastembed]
- name: Coverage
if: matrix.python-version == '3.9'
env:
@@ -80,6 +80,7 @@ jobs:
OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }}
run: |
pip install nbconvert nbformat ipykernel
+ coverage run -a -m pytest test/agentchat/test_qdrant_retrievechat.py
coverage run -a -m pytest test/test_with_openai.py
coverage run -a -m pytest test/test_notebook.py
coverage xml
diff --git a/README.md b/README.md
index 26984fe665d0..207f3971a0b3 100644
--- a/README.md
+++ b/README.md
@@ -199,7 +199,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
For more information, see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
-## Contributers Wall
+## Contributors Wall
diff --git a/autogen/agentchat/contrib/math_user_proxy_agent.py b/autogen/agentchat/contrib/math_user_proxy_agent.py
index 7a15e80ec744..f7557517da81 100644
--- a/autogen/agentchat/contrib/math_user_proxy_agent.py
+++ b/autogen/agentchat/contrib/math_user_proxy_agent.py
@@ -177,7 +177,7 @@ def __init__(
self.last_reply = None
def generate_init_message(self, problem, prompt_type="default", customized_prompt=None):
- """Generate a prompt for the assitant agent with the given problem and prompt.
+ """Generate a prompt for the assistant agent with the given problem and prompt.
Args:
problem (str): the problem to be solved.
diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py
index 3a0e19598813..017ba4e848ac 100644
--- a/autogen/agentchat/conversable_agent.py
+++ b/autogen/agentchat/conversable_agent.py
@@ -126,6 +126,7 @@ def __init__(
self.register_reply([Agent, None], ConversableAgent.generate_oai_reply)
self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
+ self.register_reply([Agent, None], ConversableAgent.generate_async_function_call_reply)
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
def register_reply(
@@ -661,6 +662,28 @@ def generate_function_call_reply(
return True, func_return
return False, None
+ async def generate_async_function_call_reply(
+ self,
+ messages: Optional[List[Dict]] = None,
+ sender: Optional[Agent] = None,
+ config: Optional[Any] = None,
+ ):
+ """Generate a reply using async function call."""
+ if config is None:
+ config = self
+ if messages is None:
+ messages = self._oai_messages[sender]
+ message = messages[-1]
+ if "function_call" in message:
+ func_call = message["function_call"]
+ func_name = func_call.get("name", "")
+ func = self._function_map.get(func_name, None)
+ if func and asyncio.coroutines.iscoroutinefunction(func):
+ _, func_return = await self.a_execute_function(func_call)
+ return True, func_return
+
+ return False, None
+
def check_termination_and_human_reply(
self,
messages: Optional[List[Dict]] = None,
@@ -732,6 +755,77 @@ def check_termination_and_human_reply(
return False, None
+ async def a_check_termination_and_human_reply(
+ self,
+ messages: Optional[List[Dict]] = None,
+ sender: Optional[Agent] = None,
+ config: Optional[Any] = None,
+ ) -> Tuple[bool, Union[str, Dict, None]]:
+ """(async) Check if the conversation should be terminated, and if human reply is provided."""
+ if config is None:
+ config = self
+ if messages is None:
+ messages = self._oai_messages[sender]
+ message = messages[-1]
+ reply = ""
+ no_human_input_msg = ""
+ if self.human_input_mode == "ALWAYS":
+ reply = await self.a_get_human_input(
+ f"Provide feedback to {sender.name}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: "
+ )
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
+ reply = reply if reply or not self._is_termination_msg(message) else "exit"
+ else:
+ if self._consecutive_auto_reply_counter[sender] >= self._max_consecutive_auto_reply_dict[sender]:
+ if self.human_input_mode == "NEVER":
+ reply = "exit"
+ else:
+ # self.human_input_mode == "TERMINATE":
+ terminate = self._is_termination_msg(message)
+ reply = await self.a_get_human_input(
+ f"Please give feedback to {sender.name}. Press enter or type 'exit' to stop the conversation: "
+ if terminate
+ else f"Please give feedback to {sender.name}. Press enter to skip and use auto-reply, or type 'exit' to stop the conversation: "
+ )
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
+ reply = reply if reply or not terminate else "exit"
+ elif self._is_termination_msg(message):
+ if self.human_input_mode == "NEVER":
+ reply = "exit"
+ else:
+ # self.human_input_mode == "TERMINATE":
+ reply = await self.a_get_human_input(
+ f"Please give feedback to {sender.name}. Press enter or type 'exit' to stop the conversation: "
+ )
+ no_human_input_msg = "NO HUMAN INPUT RECEIVED." if not reply else ""
+ # if the human input is empty, and the message is a termination message, then we will terminate the conversation
+ reply = reply or "exit"
+
+ # print the no_human_input_msg
+ if no_human_input_msg:
+ print(colored(f"\n>>>>>>>> {no_human_input_msg}", "red"), flush=True)
+
+ # stop the conversation
+ if reply == "exit":
+ # reset the consecutive_auto_reply_counter
+ self._consecutive_auto_reply_counter[sender] = 0
+ return True, None
+
+ # send the human reply
+ if reply or self._max_consecutive_auto_reply_dict[sender] == 0:
+ # reset the consecutive_auto_reply_counter
+ self._consecutive_auto_reply_counter[sender] = 0
+ return True, reply
+
+ # increment the consecutive_auto_reply_counter
+ self._consecutive_auto_reply_counter[sender] += 1
+ if self.human_input_mode != "NEVER":
+ print(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
+
+ return False, None
+
def generate_reply(
self,
messages: Optional[List[Dict]] = None,
@@ -868,6 +962,20 @@ def get_human_input(self, prompt: str) -> str:
reply = input(prompt)
return reply
+ async def a_get_human_input(self, prompt: str) -> str:
+ """(Async) Get human input.
+
+ Override this method to customize the way to get human input.
+
+ Args:
+ prompt (str): prompt for the human input.
+
+ Returns:
+ str: human input.
+ """
+ reply = input(prompt)
+ return reply
+
def run_code(self, code, **kwargs):
"""Run the code and return the result.
@@ -1002,6 +1110,56 @@ def execute_function(self, func_call):
"content": str(content),
}
+ async def a_execute_function(self, func_call):
+ """Execute an async function call and return the result.
+
+ Override this function to modify the way async functions are executed.
+
+ Args:
+ func_call: a dictionary extracted from openai message at key "function_call" with keys "name" and "arguments".
+
+ Returns:
+ A tuple of (is_exec_success, result_dict).
+ is_exec_success (boolean): whether the execution is successful.
+ result_dict: a dictionary with keys "name", "role", and "content". Value of "role" is "function".
+ """
+ func_name = func_call.get("name", "")
+ func = self._function_map.get(func_name, None)
+
+ is_exec_success = False
+ if func is not None:
+ # Extract arguments from a json-like string and put it into a dict.
+ input_string = self._format_json_str(func_call.get("arguments", "{}"))
+ try:
+ arguments = json.loads(input_string)
+ except json.JSONDecodeError as e:
+ arguments = None
+ content = f"Error: {e}\n You argument should follow json format."
+
+ # Try to execute the function
+ if arguments is not None:
+ print(
+ colored(f"\n>>>>>>>> EXECUTING ASYNC FUNCTION {func_name}...", "magenta"),
+ flush=True,
+ )
+ try:
+ if asyncio.coroutines.iscoroutinefunction(func):
+ content = await func(**arguments)
+ else:
+ # Fallback to sync function if the function is not async
+ content = func(**arguments)
+ is_exec_success = True
+ except Exception as e:
+ content = f"Error: {e}"
+ else:
+ content = f"Error: Function {func_name} not found."
+
+ return is_exec_success, {
+ "name": func_name,
+ "role": "function",
+ "content": str(content),
+ }
+
def generate_init_message(self, **context) -> Union[str, Dict]:
"""Generate the initial message for the agent.
diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py
index 9ed2ff774645..f1c549bc18b2 100644
--- a/autogen/agentchat/groupchat.py
+++ b/autogen/agentchat/groupchat.py
@@ -130,7 +130,12 @@ def __init__(
system_message=system_message,
**kwargs,
)
+ # Order of register_reply is important.
+ # Allow sync chat if initiated using initiate_chat
self.register_reply(Agent, GroupChatManager.run_chat, config=groupchat, reset_config=GroupChat.reset)
+ # Allow async chat if initiated using a_initiate_chat
+ self.register_reply(Agent, GroupChatManager.a_run_chat, config=groupchat, reset_config=GroupChat.reset)
+
# self._random = random.Random(seed)
def run_chat(
@@ -177,3 +182,48 @@ def run_chat(
speaker.send(reply, self, request_reply=False)
message = self.last_message(speaker)
return True, None
+
+ async def a_run_chat(
+ self,
+ messages: Optional[List[Dict]] = None,
+ sender: Optional[Agent] = None,
+ config: Optional[GroupChat] = None,
+ ):
+ """Run a group chat asynchronously."""
+ if messages is None:
+ messages = self._oai_messages[sender]
+ message = messages[-1]
+ speaker = sender
+ groupchat = config
+ for i in range(groupchat.max_round):
+ # set the name to speaker's name if the role is not function
+ if message["role"] != "function":
+ message["name"] = speaker.name
+ groupchat.messages.append(message)
+ # broadcast the message to all agents except the speaker
+ for agent in groupchat.agents:
+ if agent != speaker:
+ await self.a_send(message, agent, request_reply=False, silent=True)
+ if i == groupchat.max_round - 1:
+ # the last round
+ break
+ try:
+ # select the next speaker
+ speaker = groupchat.select_speaker(speaker, self)
+ # let the speaker speak
+ reply = await speaker.a_generate_reply(sender=self)
+ except KeyboardInterrupt:
+ # let the admin agent speak if interrupted
+ if groupchat.admin_name in groupchat.agent_names:
+ # admin agent is one of the participants
+ speaker = groupchat.agent_by_name(groupchat.admin_name)
+ reply = await speaker.a_generate_reply(sender=self)
+ else:
+ # admin agent is not found in the participants
+ raise
+ if reply is None:
+ break
+ # The speaker sends the message without requesting a reply
+ await speaker.a_send(reply, self, request_reply=False)
+ message = self.last_message(speaker)
+ return True, None
diff --git a/autogen/retrieve_utils.py b/autogen/retrieve_utils.py
index b6edba7a1e40..bc4fdfb75976 100644
--- a/autogen/retrieve_utils.py
+++ b/autogen/retrieve_utils.py
@@ -1,9 +1,8 @@
-from typing import List, Union, Dict, Tuple, Callable
+from typing import List, Union, Callable
import os
import requests
from urllib.parse import urlparse
import glob
-import tiktoken
import chromadb
if chromadb.__version__ < "0.4.15":
diff --git a/autogen/version.py b/autogen/version.py
index 3cb7d95ef824..fb69db9cf4de 100644
--- a/autogen/version.py
+++ b/autogen/version.py
@@ -1 +1 @@
-__version__ = "0.1.13"
+__version__ = "0.1.14"
diff --git a/setup.py b/setup.py
index e034e0b99477..4ae981ddfde2 100644
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
],
"blendsearch": ["flaml[blendsearch]"],
"mathchat": ["sympy", "pydantic==1.10.9", "wolframalpha"],
- "retrievechat": ["chromadb", "tiktoken", "sentence_transformers", "pypdf"],
+ "retrievechat": ["chromadb", "tiktoken", "sentence_transformers", "pypdf", "ipython"],
"teachable": ["chromadb"],
"gui":[
"void-terminal>=0.0.9",
diff --git a/test/agentchat/test_async_get_human_input.py b/test/agentchat/test_async_get_human_input.py
new file mode 100644
index 000000000000..cdc7ea2aa7e8
--- /dev/null
+++ b/test/agentchat/test_async_get_human_input.py
@@ -0,0 +1,35 @@
+import asyncio
+import autogen
+import pytest
+from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST
+
+
+@pytest.mark.asyncio
+async def test_async_get_human_input():
+ try:
+ import openai
+ except ImportError:
+ return
+ config_list = autogen.config_list_from_json(OAI_CONFIG_LIST, KEY_LOC)
+
+ # create an AssistantAgent instance named "assistant"
+ assistant = autogen.AssistantAgent(
+ name="assistant",
+ max_consecutive_auto_reply=2,
+ llm_config={"request_timeout": 600, "seed": 41, "config_list": config_list, "temperature": 0},
+ )
+
+ user_proxy = autogen.UserProxyAgent(name="user", human_input_mode="ALWAYS", code_execution_config=False)
+
+ async def custom_a_get_human_input(prompt):
+ return "This is a test"
+
+ user_proxy.a_get_human_input = custom_a_get_human_input
+
+ user_proxy.register_reply([autogen.Agent, None], autogen.ConversableAgent.a_check_termination_and_human_reply)
+
+ await user_proxy.a_initiate_chat(assistant, clear_history=True, message="Hello.")
+
+
+if __name__ == "__main__":
+ test_async_get_human_input()
diff --git a/test/agentchat/test_retrievechat.py b/test/agentchat/test_retrievechat.py
index 99e395de5056..d71d146194ba 100644
--- a/test/agentchat/test_retrievechat.py
+++ b/test/agentchat/test_retrievechat.py
@@ -10,7 +10,6 @@
from autogen.agentchat.contrib.retrieve_user_proxy_agent import (
RetrieveUserProxyAgent,
)
- from autogen.retrieve_utils import create_vector_db_from_dir, query_vector_db
import chromadb
from chromadb.utils import embedding_functions as ef
@@ -61,6 +60,7 @@ def test_retrievechat():
"model": config_list[0]["model"],
"client": chromadb.PersistentClient(path="/tmp/chromadb"),
"embedding_function": sentence_transformer_ef,
+ "get_or_create": True,
},
)
@@ -72,26 +72,5 @@ def test_retrievechat():
print(conversations)
-@pytest.mark.skipif(
- sys.platform in ["darwin", "win32"] or skip_test,
- reason="do not run on MacOS or windows",
-)
-def test_retrieve_utils():
- client = chromadb.PersistentClient(path="/tmp/chromadb")
- create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
- results = query_vector_db(
- query_texts=[
- "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
- ],
- n_results=4,
- client=client,
- collection_name="autogen-docs",
- search_string="AutoGen",
- )
- print(results["ids"][0])
- assert len(results["ids"][0]) == 4
-
-
if __name__ == "__main__":
test_retrievechat()
- test_retrieve_utils()
diff --git a/test/oai/test_completion.py b/test/oai/test_completion.py
index f33533b7bf88..b6cb5c31b1c2 100644
--- a/test/oai/test_completion.py
+++ b/test/oai/test_completion.py
@@ -231,7 +231,7 @@ def test_humaneval(num_samples=1):
raise_on_ratelimit_or_timeout=False,
)
# assert response == -1
- config_list = autogen.config_list_openai_aoai(KEY_LOC, exclude="aoai")
+ config_list = autogen.config_list_openai_aoai(KEY_LOC)
# a minimal tuning example
config, _ = autogen.Completion.tune(
data=tune_data,
@@ -376,11 +376,11 @@ def test_math(num_samples=-1):
]
autogen.Completion.set_cache(seed)
- config_list = autogen.config_list_openai_aoai(KEY_LOC)[:2]
+ config_list = autogen.config_list_openai_aoai(KEY_LOC)
vanilla_config = {
- "model": "text-davinci-003",
+ "model": "text-ada-001",
"temperature": 1,
- "max_tokens": 2048,
+ "max_tokens": 1024,
"n": 1,
"prompt": prompts[0],
"stop": "###",
@@ -451,5 +451,5 @@ def my_average(results):
# test_chatcompletion()
# test_multi_model()
# test_nocontext()
- test_humaneval(1)
- # test_math(1)
+ # test_humaneval(1)
+ test_math(1)
diff --git a/test/test_function_call.py b/test/test_function_call.py
index 9b026ca3c1d1..a78ff131d82f 100644
--- a/test/test_function_call.py
+++ b/test/test_function_call.py
@@ -127,7 +127,68 @@ def get_number():
assert user.execute_function(func_call)[1]["content"] == "42"
+@pytest.mark.asyncio
+async def test_a_execute_function():
+ from autogen.agentchat import UserProxyAgent
+ import time
+
+ # Create an async function
+ async def add_num(num_to_be_added):
+ given_num = 10
+ time.sleep(1)
+ return num_to_be_added + given_num
+
+ user = UserProxyAgent(name="test", function_map={"add_num": add_num})
+ correct_args = {"name": "add_num", "arguments": '{ "num_to_be_added": 5 }'}
+
+ # Asset coroutine doesn't match.
+ assert user.execute_function(func_call=correct_args)[1]["content"] != "15"
+ # Asset awaited coroutine does match.
+ assert (await user.a_execute_function(func_call=correct_args))[1]["content"] == "15"
+
+ # function name called is wrong or doesn't exist
+ wrong_func_name = {"name": "subtract_num", "arguments": '{ "num_to_be_added": 5 }'}
+ assert "Error: Function" in (await user.a_execute_function(func_call=wrong_func_name))[1]["content"]
+
+ # arguments passed is not in correct json format
+ wrong_json_format = {
+ "name": "add_num",
+ "arguments": '{ "num_to_be_added": 5, given_num: 10 }',
+ } # should be "given_num" with quotes
+ assert (
+ "You argument should follow json format."
+ in (await user.a_execute_function(func_call=wrong_json_format))[1]["content"]
+ )
+
+ # function execution error with wrong arguments passed
+ wrong_args = {"name": "add_num", "arguments": '{ "num_to_be_added": 5, "given_num": 10 }'}
+ assert "Error: " in (await user.a_execute_function(func_call=wrong_args))[1]["content"]
+
+ # 2. test calling a class method
+ class AddNum:
+ def __init__(self, given_num):
+ self.given_num = given_num
+
+ def add(self, num_to_be_added):
+ self.given_num = num_to_be_added + self.given_num
+ return self.given_num
+
+ user = UserProxyAgent(name="test", function_map={"add_num": AddNum(given_num=10).add})
+ func_call = {"name": "add_num", "arguments": '{ "num_to_be_added": 5 }'}
+ assert (await user.a_execute_function(func_call=func_call))[1]["content"] == "15"
+ assert (await user.a_execute_function(func_call=func_call))[1]["content"] == "20"
+
+ # 3. test calling a function with no arguments
+ def get_number():
+ return 42
+
+ user = UserProxyAgent("user", function_map={"get_number": get_number})
+ func_call = {"name": "get_number", "arguments": "{}"}
+ assert (await user.a_execute_function(func_call))[1]["content"] == "42"
+
+
if __name__ == "__main__":
test_json_extraction()
test_execute_function()
+ test_a_execute_function()
test_eval_math_responses()
diff --git a/test/test_retrieve_utils.py b/test/test_retrieve_utils.py
index db581b8798aa..0585b2a147d9 100644
--- a/test/test_retrieve_utils.py
+++ b/test/test_retrieve_utils.py
@@ -7,19 +7,15 @@
extract_text_from_pdf,
split_files_to_chunks,
get_files_from_dir,
- get_file_from_url,
is_url,
create_vector_db_from_dir,
query_vector_db,
- TEXT_FORMATS,
)
from autogen.token_count_utils import count_token
import os
-import sys
import pytest
import chromadb
-import tiktoken
test_dir = os.path.join(os.path.dirname(__file__), "test_files")
@@ -157,6 +153,7 @@ def custom_text_split_function(text):
client=client,
collection_name="mytestcollection",
custom_text_split_function=custom_text_split_function,
+ get_or_create=True,
)
results = query_vector_db(["autogen"], client=client, collection_name="mytestcollection", n_results=1)
assert (
@@ -164,6 +161,21 @@ def custom_text_split_function(text):
== "AutoGen is an advanced tool designed to assist developers in harnessing the capabilities\nof Large Language Models (LLMs) for various applications. The primary purpose o"
)
+ def test_retrieve_utils(self):
+ client = chromadb.PersistentClient(path="/tmp/chromadb")
+ create_vector_db_from_dir(dir_path="./website/docs", client=client, collection_name="autogen-docs")
+ results = query_vector_db(
+ query_texts=[
+ "How can I use AutoGen UserProxyAgent and AssistantAgent to do code generation?",
+ ],
+ n_results=4,
+ client=client,
+ collection_name="autogen-docs",
+ search_string="AutoGen",
+ )
+ print(results["ids"][0])
+ assert len(results["ids"][0]) == 4
+
if __name__ == "__main__":
pytest.main()
diff --git a/website/docs/FAQ.md b/website/docs/FAQ.md
index fb86919adeeb..6c87cac03d86 100644
--- a/website/docs/FAQ.md
+++ b/website/docs/FAQ.md
@@ -88,7 +88,6 @@ Otherwise, reply CONTINUE, or the reason why the task is not solved yet."""
If you have problems with agents running `pip install` or get errors similar to `Error while fetching server API version: ('Connection aborted.', FileNotFoundError(2, 'No such file or directory')`, you can choose **'python:3'** as image as shown in the code example above and that should solve the problem.
-
### Agents keep thanking each other when using `gpt-3.5-turbo`
When using `gpt-3.5-turbo` you may often encounter agents going into a "gratitude loop", meaning when they complete a task they will begin congratulating and thanking eachother in a continuous loop. This is a limitation in the performance of `gpt-3.5-turbo`, in contrast to `gpt-4` which has no problem remembering instructions. This can hinder the experimentation experience when trying to test out your own use case with cheaper models.
@@ -108,3 +107,24 @@ prompt += termination_notice
```
**Note**: This workaround gets the job done around 90% of the time, but there are occurences where the LLM still forgets to terminate the conversation.
+
+## ChromaDB fails in codespaces because of old version of sqlite3
+
+(from [issue #251](https://github.com/microsoft/autogen/issues/251))
+
+Code examples that use chromadb (like retrieval) fail in codespaces due to a sqlite3 requirement.
+```
+>>> import chromadb
+Traceback (most recent call last):
+ File "", line 1, in
+ File "/home/vscode/.local/lib/python3.10/site-packages/chromadb/__init__.py", line 69, in
+ raise RuntimeError(
+RuntimeError: Your system has an unsupported version of sqlite3. Chroma requires sqlite3 >= 3.35.0.
+Please visit https://docs.trychroma.com/troubleshooting#sqlite to learn how to upgrade.
+```
+
+Workaround:
+1. `pip install pysqlite3-binary`
+2. `mkdir /home/vscode/.local/lib/python3.10/site-packages/google/colab`
+
+Explanation: Per [this gist](https://gist.github.com/defulmere/8b9695e415a44271061cc8e272f3c300?permalink_comment_id=4711478#gistcomment-4711478), linked from the official [chromadb docs](https://docs.trychroma.com/troubleshooting#sqlite), adding this folder triggers chromadb to use pysqlite3 instead of the default.
diff --git a/website/docs/Installation.md b/website/docs/Installation.md
index 193e1383974a..2cacceda2c08 100644
--- a/website/docs/Installation.md
+++ b/website/docs/Installation.md
@@ -8,8 +8,13 @@ When not using a docker container, we recommend using a virtual environment to i
You can create a virtual environment with `venv` as below:
```bash
-python3 -m venv autogen
-source autogen/bin/activate
+python3 -m venv pyautogen
+source pyautogen/bin/activate
+```
+
+The following command will deactivate the current `venv` environment:
+```bash
+deactivate
```
### Option 2: conda
@@ -17,8 +22,13 @@ source autogen/bin/activate
Another option is with `Conda`, Conda works better at solving dependency conflicts than pip. You can install it by following [this doc](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html),
and then create a virtual environment as below:
```bash
-conda create -n autogen python=3.10 # python 3.10 is recommended as it's stable and not too old
-conda activate autogen
+conda create -n pyautogen python=3.10 # python 3.10 is recommended as it's stable and not too old
+conda activate pyautogen
+```
+
+The following command will deactivate the current `conda` environment:
+```bash
+conda deactivate
```
Now, you're ready to install AutoGen in the virtual environment you've just created.