Skip to content

Commit

Permalink
Bump version to 0.3.0: Merge pull request #5 from atomiechen/dev
Browse files Browse the repository at this point in the history
Bump version to 0.3.0
  • Loading branch information
atomiechen authored Jul 28, 2023
2 parents 8991277 + 98acf0d commit b1a5b7c
Show file tree
Hide file tree
Showing 12 changed files with 392 additions and 25 deletions.
18 changes: 18 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,21 @@ dist
*.env

venv
tmp*

# image files
*.png
*.jpg
*.jpeg
*.gif
*.svg
*.ico
*.pdf
# audio files
*.m4a
*.mp3
*.mp4
*.wav
*.flac
*.aac

44 changes: 41 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ Example scripts are placed in [tests](./tests) folder.

## OpenAI API Request

This toolkit uses HTTP API request instead of OpenAI's official python package to support client-side `timeout` control:
### Timeout control

This toolkit supports client-side `timeout` control, which OpenAI's official python package does not support yet:

```python
from handyllm import OpenAIAPI
Expand All @@ -44,14 +46,20 @@ response = OpenAIAPI.chat(
print(response['choices'][0]['message']['content'])
```

### Authorization

API key and organization will be loaded using the environment variable `OPENAI_API_KEY` and `OPENAI_ORGANIZATION`, or you can set manually:

```python
OpenAIAPI.api_key = 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
OpenAIAPI.organization = '......' # default: None
```

Stream response of `chat`/`completions` can be achieved using `steam` parameter:
Or, you can pass `api_key` and `organization` parameters in each API call.

### Stream response

Stream response of `chat`/`completions`/`finetunes_list_events` can be achieved using `steam` parameter:

```python
response = OpenAIAPI.chat(
Expand All @@ -71,10 +79,40 @@ for text in OpenAIAPI.stream_chat(response):
# print(chunk['choices'][0]['delta']['content'], end='')
```

### Supported APIs

- chat
- completions
- edits
- embeddings
- models_list
- models_retrieve
- moderations
- images_generations
- images_edits
- images_variations
- audio_transcriptions
- audtio_translations
- files_list
- files_upload
- files_delete
- files_retrieve
- files_retrieve_content
- finetunes_create
- finetunes_list
- finetunes_retrieve
- finetunes_cancel
- finetunes_list_events
- finetunes_delete_model

Please refer to [OpenAI official API reference](https://platform.openai.com/docs/api-reference) for details.



## Prompt

### Prompt Conversion

`PromptConverter` can convert this text file `prompt.txt` into a structured prompt for chat API calls:

```
Expand Down Expand Up @@ -109,7 +147,7 @@ chat = converter.rawfile2chat('prompt.txt')
new_chat = converter.chat_replace_variables(chat, {r'%misc%': 'Note: do not use any bad word.'})
```


### Substitute

`PromptConverter` can also substitute placeholder variables like `%output_format%` stored in text files to make multiple prompts modular. A substitute map `substitute.txt` looks like this:

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "HandyLLM"
version = "0.2.1"
version = "0.3.0"
authors = [
{ name="Atomie CHEN", email="atomic_cwh@163.com" },
]
Expand Down
172 changes: 151 additions & 21 deletions src/handyllm/openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def _get_organization_from_env():
return os.environ.get('OPENAI_ORGANIZATION')

@staticmethod
def _api_request(url, api_key, organization=None, timeout=None, **kwargs):
def _api_request(url, api_key, organization=None, method='post', timeout=None, **kwargs):
if api_key is None:
raise Exception("OpenAI API key is not set")
if url is None:
Expand All @@ -50,20 +50,31 @@ def _api_request(url, api_key, organization=None, timeout=None, **kwargs):
log_strs.append(f"timeout: {timeout}")
module_logger.info('\n'.join(log_strs))

request_data = kwargs
headers = {
'Authorization': 'Bearer ' + api_key,
'Content-Type': 'application/json'
}
files = kwargs.pop('files', None)
stream = kwargs.get('stream', False)
headers = { 'Authorization': 'Bearer ' + api_key }
json_data = None
data = None
params = None
if organization is not None:
headers['OpenAI-Organization'] = organization

stream = kwargs.get('stream', False)
response = requests.post(
url,
headers=headers,
# data=json.dumps(request_data),
json=request_data,
if method == 'post':
if files is None:
headers['Content-Type'] = 'application/json'
json_data = kwargs
else: ## if files is not None, let requests handle the content type
data = kwargs
if method == 'get' and stream:
params = { "stream": "true" }

response = requests.request(
method,
url,
headers=headers,
data=data,
json=json_data,
files=files,
params=params,
stream=stream,
timeout=timeout,
)
Expand Down Expand Up @@ -105,18 +116,30 @@ def stream_completions(response):

@staticmethod
def api_request_endpoint(request_url, endpoint_manager=None, **kwargs):
specified_api_key = kwargs.pop('api_key', None)
specified_organization = kwargs.pop('organization', None)
if endpoint_manager != None:
# 每次换服务器和key要同时换,保证服务器和key是对应的
base_url, api_key, organization = endpoint_manager.get_endpoint()
else:
base_url = OpenAIAPI.base_url
api_key = OpenAIAPI.api_key if OpenAIAPI.api_key is not None else OpenAIAPI._get_key_from_env()
organization = OpenAIAPI.organization if OpenAIAPI.organization is not None else OpenAIAPI._get_organization_from_env()
if specified_api_key is not None:
api_key = specified_api_key
elif OpenAIAPI.api_key is not None:
api_key = OpenAIAPI.api_key
else:
api_key = OpenAIAPI._get_key_from_env()
if specified_organization is not None:
organization = specified_organization
elif OpenAIAPI.organization is not None:
organization = OpenAIAPI.organization
else:
organization = OpenAIAPI._get_organization_from_env()
url = base_url + request_url
return OpenAIAPI._api_request(url, api_key, organization=organization, **kwargs)

@staticmethod
def chat(timeout=None, endpoint_manager=None, logger=None, log_marks=[], **kwargs):
def chat(model, messages, timeout=None, endpoint_manager=None, logger=None, log_marks=[], **kwargs):
request_url = '/chat/completions'

if logger is not None and 'messages' in kwargs:
Expand All @@ -131,7 +154,7 @@ def chat(timeout=None, endpoint_manager=None, logger=None, log_marks=[], **kwarg

start_time = time.time()
try:
response = OpenAIAPI.api_request_endpoint(request_url, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)
response = OpenAIAPI.api_request_endpoint(request_url, model=model, messages=messages, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

if logger is not None:
end_time = time.time()
Expand Down Expand Up @@ -170,7 +193,7 @@ def wrapper(response):
return response

@staticmethod
def completions(timeout=None, endpoint_manager=None, logger=None, log_marks=[], **kwargs):
def completions(model, prompt, timeout=None, endpoint_manager=None, logger=None, log_marks=[], **kwargs):
request_url = '/completions'

if logger is not None and 'prompt' in kwargs:
Expand All @@ -185,7 +208,7 @@ def completions(timeout=None, endpoint_manager=None, logger=None, log_marks=[],

start_time = time.time()
try:
response = OpenAIAPI.api_request_endpoint(request_url, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)
response = OpenAIAPI.api_request_endpoint(request_url, model=model, prompt=prompt, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

if logger is not None:
end_time = time.time()
Expand Down Expand Up @@ -223,9 +246,116 @@ def wrapper(response):
return response

@staticmethod
def embeddings(timeout=None, endpoint_manager=None, **kwargs):
def edits(model, instruction, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/edits'
return OpenAIAPI.api_request_endpoint(request_url, model=model, instruction=instruction, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def embeddings(model, input, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/embeddings'
return OpenAIAPI.api_request_endpoint(request_url, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)
return OpenAIAPI.api_request_endpoint(request_url, model=model, input=input, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def models_list(timeout=None, endpoint_manager=None, **kwargs):
request_url = '/models'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def models_retrieve(model, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/models/{model}'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def moderations(input, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/moderations'
return OpenAIAPI.api_request_endpoint(request_url, input=input, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def images_generations(prompt, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/images/generations'
return OpenAIAPI.api_request_endpoint(request_url, prompt=prompt, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def images_edits(image, prompt, mask=None, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/images/edits'
files = { 'image': image }
if mask:
files['mask'] = mask
return OpenAIAPI.api_request_endpoint(request_url, prompt=prompt, method='post', files=files, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def images_variations(image, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/images/variations'
files = { 'image': image }
return OpenAIAPI.api_request_endpoint(request_url, method='post', files=files, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def audio_transcriptions(file, model, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/audio/transcriptions'
files = { 'file': file }
return OpenAIAPI.api_request_endpoint(request_url, model=model, method='post', files=files, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def audio_translations(file, model, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/audio/translations'
files = { 'file': file }
return OpenAIAPI.api_request_endpoint(request_url, model=model, method='post', files=files, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def files_list(timeout=None, endpoint_manager=None, **kwargs):
request_url = '/files'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def files_upload(file, purpose, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/files'
files = { 'file': file }
return OpenAIAPI.api_request_endpoint(request_url, purpose=purpose, method='post', files=files, timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def files_delete(file_id, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/files/{file_id}'
return OpenAIAPI.api_request_endpoint(request_url, method='delete', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def files_retrieve(file_id, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/files/{file_id}'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def files_retrieve_content(file_id, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/files/{file_id}/content'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def finetunes_create(training_file, timeout=None, endpoint_manager=None, **kwargs):
request_url = '/fine-tunes'
return OpenAIAPI.api_request_endpoint(request_url, training_file=training_file, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def finetunes_list(timeout=None, endpoint_manager=None, **kwargs):
request_url = '/fine-tunes'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def finetunes_retrieve(fine_tune_id, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/fine-tunes/{fine_tune_id}'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def finetunes_cancel(fine_tune_id, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/fine-tunes/{fine_tune_id}/cancel'
return OpenAIAPI.api_request_endpoint(request_url, method='post', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def finetunes_list_events(fine_tune_id, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/fine-tunes/{fine_tune_id}/events'
return OpenAIAPI.api_request_endpoint(request_url, method='get', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)

@staticmethod
def finetunes_delete_model(model, timeout=None, endpoint_manager=None, **kwargs):
request_url = f'/models/{model}'
return OpenAIAPI.api_request_endpoint(request_url, method='delete', timeout=timeout, endpoint_manager=endpoint_manager, **kwargs)


if __name__ == '__main__':
Expand Down
25 changes: 25 additions & 0 deletions src/handyllm/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import requests
from urllib.parse import urlparse
import os
import time


def get_filename_from_url(download_url):
# Parse the URL.
parsed_url = urlparse(download_url)
# The last part of the path is usually the filename.
filename = os.path.basename(parsed_url.path)
return filename

def download_binary(download_url, file_path=None, dir='.'):
response = requests.get(download_url, allow_redirects=True)
if file_path == None:
filename = get_filename_from_url(download_url)
if filename == '' or filename == None:
filename = 'download_' + time.strftime("%Y%m%d_%H%M%S")
file_path = os.path.abspath(os.path.join(dir, filename))
# Open the file in binary mode and write to it.
with open(file_path, "wb") as file:
file.write(response.content)
return file_path

17 changes: 17 additions & 0 deletions tests/test_audio.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from handyllm import OpenAIAPI

from dotenv import load_dotenv, find_dotenv
# load env parameters from file named .env
# API key is read from environment variable OPENAI_API_KEY
# organization is read from environment variable OPENAI_ORGANIZATION
load_dotenv(find_dotenv())

file_path = 'hello.m4a'

with open(file_path, "rb") as file_bin:
response = OpenAIAPI.audio_transcriptions(
file=file_bin,
model='whisper-1',
# timeout=10,
)
print(response['text'])
File renamed without changes.
Loading

0 comments on commit b1a5b7c

Please sign in to comment.