Skip to content

Commit

Permalink
Bump version to 0.2.0: Merge pull request #3 from atomiechen/dev
Browse files Browse the repository at this point in the history
Bump version to 0.2.0
  • Loading branch information
atomiechen authored Jul 27, 2023
2 parents 2ae250e + 9ab88ec commit 2b742cf
Show file tree
Hide file tree
Showing 6 changed files with 178 additions and 12 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,5 @@ dist
*.egg-info

*.env

venv
44 changes: 40 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# HandyLLM

[![PyPI](https://img.shields.io/pypi/v/HandyLLM)](https://github.com/atomiechen/HandyLLM) ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/Handyllm)
[![GitHub](https://img.shields.io/badge/github-HandyLLM-blue?logo=github)](https://github.com/atomiechen/HandyLLM) [![PyPI](https://img.shields.io/pypi/v/HandyLLM?logo=pypi&logoColor=white)](https://pypi.org/project/HandyLLM/)

A handy toolkit for using LLM.

Expand All @@ -12,6 +12,18 @@ A handy toolkit for using LLM.
pip3 install handyllm
```

or, install from the Github repo to get latest updates:

```shell
pip3 install git+https://github.com/atomiechen/handyllm.git
```



## Examples

Example scripts are placed in [tests](./tests) folder.



## OpenAI API Request
Expand All @@ -20,19 +32,43 @@ This toolkit uses HTTP API request instead of OpenAI's official python package t

```python
from handyllm import OpenAIAPI
OpenAIAPI.api_key = os.environ.get('OPENAI_API_KEY')
prompt = [{
"role": "user",
"content": "please tell me a joke"
}]
response = OpenAIAPI.chat(
model="gpt-3.5-turbo",
messages=prompt,
timeout=10
)
print(response['choices'][0]['message']['content'])
```

API key will be loaded using the environment variable `OPENAI_API_KEY`, or you can set manually:
API key and organization will be loaded using the environment variable `OPENAI_API_KEY` and `OPENAI_ORGANIZATION`, or you can set manually:

```python
OpenAIAPI.api_key = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
OpenAIAPI.organization = '......' # default to None
OpenAIAPI.organization = '......' # default: None
```

Stream response of `chat`/`completions` can be achieved using `steam` parameter:

```python
response = OpenAIAPI.chat(
model="gpt-3.5-turbo",
messages=prompt,
timeout=10,
stream=True
)

# you can use this to stream the response text
for text in OpenAIAPI.stream_chat(response):
print(text, end='')

# or you can use this to get the whole response
# for chunk in response:
# if 'content' in chunk['choices'][0]['delta']:
# print(chunk['choices'][0]['delta']['content'], end='')
```


Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "HandyLLM"
version = "0.1.0"
version = "0.2.0"
authors = [
{ name="Atomie CHEN", email="atomic_cwh@163.com" },
]
Expand Down
59 changes: 53 additions & 6 deletions src/handyllm/openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,28 @@
class OpenAIAPI:

base_url = "https://api.openai.com/v1"
api_key = os.environ.get('OPENAI_API_KEY')

# set this to your API key;
# or environment variable OPENAI_API_KEY will be used.
api_key = None

# set this to your organization ID;
# or environment variable OPENAI_ORGANIZATION will be used;
# can be None.
organization = None

converter = PromptConverter()

@staticmethod
def _get_key_from_env():
return os.environ.get('OPENAI_API_KEY')

@staticmethod
def _get_organization_from_env():
return os.environ.get('OPENAI_ORGANIZATION')

@staticmethod
def api_request(url, api_key, organization=None, timeout=None, **kwargs):
def _api_request(url, api_key, organization=None, timeout=None, **kwargs):
if api_key is None:
raise Exception("OpenAI API key is not set")
if url is None:
Expand Down Expand Up @@ -59,19 +74,51 @@ def api_request(url, api_key, organization=None, timeout=None, **kwargs):
err_msg = f"OpenAI API error ({url} {response.status_code} {response.reason}): {message}"
module_logger.error(err_msg)
raise Exception(err_msg)
return response.json()

stream = kwargs.get('stream', False)
if stream:
return OpenAIAPI._gen_stream_response(response)
else:
return response.json()

@staticmethod
def _gen_stream_response(response):
data_buffer = ''
for chunk in response.iter_content(decode_unicode=True):
data_buffer += chunk
while '\n' in data_buffer: # when '\n' is in the buffer, there is a complete message to process
line, data_buffer = data_buffer.split('\n', 1)
line = line.strip()
if line.startswith('data:'):
line = line[len('data:'):].strip()
if line == '[DONE]': # end the function when '[DONE]' message is received
return
else:
data = json.loads(line)
yield data

@staticmethod
def stream_chat(response):
for data in response:
if 'content' in data['choices'][0]['delta']:
yield data['choices'][0]['delta']['content']

@staticmethod
def stream_completions(response):
for data in response:
yield data['choices'][0]['text']

@staticmethod
def api_request_endpoint(request_url, endpoint_manager=None, **kwargs):
if endpoint_manager != None:
# 每次换服务器和key要同时换,保证服务器和key是对应的
base_url, api_key, organization = endpoint_manager.get_endpoint()
else:
base_url = OpenAIAPI.base_url
api_key = OpenAIAPI.api_key
organization = OpenAIAPI.organization
api_key = OpenAIAPI.api_key if OpenAIAPI.api_key is not None else OpenAIAPI._get_key_from_env()
organization = OpenAIAPI.organization if OpenAIAPI.organization is not None else OpenAIAPI._get_organization_from_env()
url = base_url + request_url
return OpenAIAPI.api_request(url, api_key, organization=organization, **kwargs)
return OpenAIAPI._api_request(url, api_key, organization=organization, **kwargs)

@staticmethod
def chat(timeout=None, endpoint_manager=None, logger=None, log_marks=[], **kwargs):
Expand Down
21 changes: 20 additions & 1 deletion tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,15 @@
from dotenv import load_dotenv, find_dotenv
# load env parameters from file named .env
# API key is read from environment variable OPENAI_API_KEY
# organization is read from environment variable OPENAI_ORGANIZATION
load_dotenv(find_dotenv())

## or you can set these parameters in code
OpenAIAPI.api_key = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
# OpenAIAPI.api_key = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
# OpenAIAPI.organization = None

# ----- EXAMPLE 1 -----

prompt = [{
"role": "user",
"content": "please tell me a joke"
Expand All @@ -24,3 +27,19 @@
timeout=10
)
print(response['choices'][0]['message']['content'])


print()
print("-----")


# ----- EXAMPLE 2 -----

response = OpenAIAPI.completions(
model="text-davinci-002",
prompt="count to 23 and stop: 1,2,3,",
timeout=10,
max_tokens=256,
echo=True, # Echo back the prompt in addition to the completion
)
print(response['choices'][0]['text'])
62 changes: 62 additions & 0 deletions tests/test_stream.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from handyllm import OpenAIAPI

from dotenv import load_dotenv, find_dotenv
# load env parameters from file named .env
# API key is read from environment variable OPENAI_API_KEY
# organization is read from environment variable OPENAI_ORGANIZATION
load_dotenv(find_dotenv())

## or you can set these parameters in code
# OpenAIAPI.api_key = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
# OpenAIAPI.organization = None

# ----- EXAMPLE 1 -----

prompt = [{
"role": "user",
"content": "please tell me a joke"
}]
response = OpenAIAPI.chat(
model="gpt-3.5-turbo",
messages=prompt,
temperature=0.2,
max_tokens=256,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
timeout=10,
stream=True
)

# you can use this to stream the response
for text in OpenAIAPI.stream_chat(response):
print(text, end='')

# or you can use this to get the whole response
# for chunk in response:
# if 'content' in chunk['choices'][0]['delta']:
# print(chunk['choices'][0]['delta']['content'], end='')


print()
print("-----")


# ----- EXAMPLE 2 -----

response = OpenAIAPI.completions(
model="text-davinci-002",
prompt="count to 23 and stop: 1,2,3,",
timeout=10,
max_tokens=256,
echo=True, # Echo back the prompt in addition to the completion
stream=True
)

# you can use this to stream the response
for text in OpenAIAPI.stream_completions(response):
print(text, end='')

# or you can use this to get the whole response
# for chunk in response:
# print(chunk['choices'][0]['text'], end='')

0 comments on commit 2b742cf

Please sign in to comment.