-
Notifications
You must be signed in to change notification settings - Fork 5.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge branch 'main' into jerry/bump_version_0.6.38
- Loading branch information
Showing
19 changed files
with
1,523 additions
and
36 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
from llama_index.llms.base import ChatMessage, ChatResponse, ChatResponseGen | ||
from llama_index.llms.huggingface import HuggingFaceLLM | ||
from llama_index.llms.langchain import LangChainLLM | ||
from llama_index.llms.openai import OpenAI | ||
|
||
__all__ = [ | ||
"OpenAI", | ||
"LangChainLLM", | ||
"HuggingFaceLLM", | ||
"ChatMessage", | ||
"ChatResponse", | ||
"ChatResponseGen", | ||
] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
from abc import ABC, abstractmethod | ||
from enum import Enum | ||
from typing import Any, Generator, Optional, Sequence | ||
|
||
from pydantic import BaseModel, Field | ||
|
||
from llama_index.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS | ||
|
||
|
||
class MessageRole(str, Enum): | ||
SYSTEM = "system" | ||
USER = "user" | ||
ASSISTANT = "assistant" | ||
FUNCTION = "function" | ||
|
||
|
||
# ===== Generic Model Input - Chat ===== | ||
class ChatMessage(BaseModel): | ||
role: MessageRole = MessageRole.USER | ||
content: Optional[str] = "" | ||
additional_kwargs: dict = Field(default_factory=dict) | ||
name: Optional[str] = None | ||
|
||
def __str__(self) -> str: | ||
return f"{self.role.value}: {self.content}" | ||
|
||
|
||
# ===== Generic Model Output - Chat ===== | ||
class ChatResponse(BaseModel): | ||
message: ChatMessage | ||
raw: Optional[dict] = None | ||
delta: Optional[str] = None | ||
|
||
def __str__(self) -> str: | ||
return str(self.message) | ||
|
||
|
||
ChatResponseGen = Generator[ChatResponse, None, None] | ||
|
||
# ===== Generic Model Output - Completion ===== | ||
class CompletionResponse(BaseModel): | ||
text: str | ||
additional_kwargs: dict = Field(default_factory=dict) | ||
raw: Optional[dict] = None | ||
delta: Optional[str] = None | ||
|
||
def __str__(self) -> str: | ||
return self.text | ||
|
||
|
||
CompletionResponseGen = Generator[CompletionResponse, None, None] | ||
|
||
|
||
class LLMMetadata(BaseModel): | ||
"""LLM metadata.""" | ||
|
||
context_window: int = DEFAULT_CONTEXT_WINDOW | ||
num_output: int = DEFAULT_NUM_OUTPUTS | ||
|
||
|
||
class LLM(ABC): | ||
@property | ||
@abstractmethod | ||
def metadata(self) -> LLMMetadata: | ||
pass | ||
|
||
@abstractmethod | ||
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: | ||
pass | ||
|
||
@abstractmethod | ||
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: | ||
pass | ||
|
||
@abstractmethod | ||
def stream_chat( | ||
self, messages: Sequence[ChatMessage], **kwargs: Any | ||
) -> ChatResponseGen: | ||
pass | ||
|
||
@abstractmethod | ||
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: | ||
pass | ||
|
||
# ===== Async Endpoints ===== | ||
@abstractmethod | ||
async def achat( | ||
self, messages: Sequence[ChatMessage], **kwargs: Any | ||
) -> ChatResponse: | ||
pass | ||
|
||
@abstractmethod | ||
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: | ||
pass | ||
|
||
@abstractmethod | ||
async def astream_chat( | ||
self, messages: Sequence[ChatMessage], **kwargs: Any | ||
) -> ChatResponseGen: | ||
pass | ||
|
||
@abstractmethod | ||
async def astream_complete( | ||
self, prompt: str, **kwargs: Any | ||
) -> CompletionResponseGen: | ||
pass |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
from typing import Any, Sequence | ||
|
||
from llama_index.llms.base import ( | ||
LLM, | ||
ChatMessage, | ||
ChatResponse, | ||
ChatResponseGen, | ||
CompletionResponse, | ||
CompletionResponseGen, | ||
) | ||
from llama_index.llms.generic_utils import ( | ||
completion_to_chat_decorator, | ||
stream_completion_to_chat_decorator, | ||
) | ||
|
||
|
||
class CustomLLM(LLM): | ||
"""Simple abstract base class for custom LLMs. | ||
Subclasses must implement the `__init__`, `complete`, | ||
`stream_complete`, and `metadata` methods. | ||
""" | ||
|
||
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: | ||
chat_fn = completion_to_chat_decorator(self.complete) | ||
return chat_fn(messages, **kwargs) | ||
|
||
def stream_chat( | ||
self, messages: Sequence[ChatMessage], **kwargs: Any | ||
) -> ChatResponseGen: | ||
stream_chat_fn = stream_completion_to_chat_decorator(self.stream_complete) | ||
return stream_chat_fn(messages, **kwargs) | ||
|
||
async def achat( | ||
self, | ||
messages: Sequence[ChatMessage], | ||
**kwargs: Any, | ||
) -> ChatResponse: | ||
return self.chat(messages, **kwargs) | ||
|
||
async def astream_chat( | ||
self, | ||
messages: Sequence[ChatMessage], | ||
**kwargs: Any, | ||
) -> ChatResponseGen: | ||
return self.stream_chat(messages, **kwargs) | ||
|
||
async def acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse: | ||
return self.complete(prompt, **kwargs) | ||
|
||
async def astream_complete( | ||
self, prompt: str, **kwargs: Any | ||
) -> CompletionResponseGen: | ||
return self.stream_complete(prompt, **kwargs) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,154 @@ | ||
from typing import Any, Callable, Sequence | ||
|
||
from llama_index.llms.base import ( | ||
ChatMessage, | ||
ChatResponse, | ||
ChatResponseGen, | ||
CompletionResponse, | ||
CompletionResponseGen, | ||
MessageRole, | ||
) | ||
|
||
|
||
def messages_to_prompt(messages: Sequence[ChatMessage]) -> str: | ||
"""Convert messages to a string prompt.""" | ||
string_messages = [] | ||
for message in messages: | ||
role = message.role | ||
content = message.content | ||
string_message = f"{role}: {content}" | ||
|
||
addtional_kwargs = message.additional_kwargs | ||
if addtional_kwargs: | ||
string_message += f"\n{addtional_kwargs}" | ||
string_messages.append(string_message) | ||
|
||
string_messages.append(f"{MessageRole.ASSISTANT}: ") | ||
return "\n".join(string_messages) | ||
|
||
|
||
def prompt_to_messages(prompt: str) -> Sequence[ChatMessage]: | ||
"""Convert a string prompt to a sequence of messages.""" | ||
return [ChatMessage(role=MessageRole.USER, content=prompt)] | ||
|
||
|
||
def completion_response_to_chat_response( | ||
completion_response: CompletionResponse, | ||
) -> ChatResponse: | ||
"""Convert a completion response to a chat response.""" | ||
return ChatResponse( | ||
message=ChatMessage( | ||
role=MessageRole.ASSISTANT, | ||
content=completion_response.text, | ||
additional_kwargs=completion_response.additional_kwargs, | ||
), | ||
raw=completion_response.raw, | ||
) | ||
|
||
|
||
def stream_completion_response_to_chat_response( | ||
completion_response_gen: CompletionResponseGen, | ||
) -> ChatResponseGen: | ||
"""Convert a stream completion response to a stream chat response.""" | ||
|
||
def gen() -> ChatResponseGen: | ||
for response in completion_response_gen: | ||
yield ChatResponse( | ||
message=ChatMessage( | ||
role=MessageRole.ASSISTANT, | ||
content=response.text, | ||
additional_kwargs=response.additional_kwargs, | ||
), | ||
delta=response.delta, | ||
raw=response.raw, | ||
) | ||
|
||
return gen() | ||
|
||
|
||
def chat_response_to_completion_response( | ||
chat_response: ChatResponse, | ||
) -> CompletionResponse: | ||
"""Convert a chat response to a completion response.""" | ||
return CompletionResponse( | ||
text=chat_response.message.content or "", | ||
additional_kwargs=chat_response.message.additional_kwargs, | ||
raw=chat_response.raw, | ||
) | ||
|
||
|
||
def stream_chat_response_to_completion_response( | ||
chat_response_gen: ChatResponseGen, | ||
) -> CompletionResponseGen: | ||
"""Convert a stream chat response to a completion response.""" | ||
|
||
def gen() -> CompletionResponseGen: | ||
for response in chat_response_gen: | ||
yield CompletionResponse( | ||
text=response.message.content or "", | ||
additional_kwargs=response.message.additional_kwargs, | ||
delta=response.delta, | ||
raw=response.raw, | ||
) | ||
|
||
return gen() | ||
|
||
|
||
def completion_to_chat_decorator( | ||
func: Callable[..., CompletionResponse] | ||
) -> Callable[..., ChatResponse]: | ||
"""Convert a completion function to a chat function.""" | ||
|
||
def wrapper(messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: | ||
# normalize input | ||
prompt = messages_to_prompt(messages) | ||
completion_response = func(prompt, **kwargs) | ||
# normalize output | ||
return completion_response_to_chat_response(completion_response) | ||
|
||
return wrapper | ||
|
||
|
||
def stream_completion_to_chat_decorator( | ||
func: Callable[..., CompletionResponseGen] | ||
) -> Callable[..., ChatResponseGen]: | ||
"""Convert a completion function to a chat function.""" | ||
|
||
def wrapper(messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponseGen: | ||
# normalize input | ||
prompt = messages_to_prompt(messages) | ||
completion_response = func(prompt, **kwargs) | ||
# normalize output | ||
return stream_completion_response_to_chat_response(completion_response) | ||
|
||
return wrapper | ||
|
||
|
||
def chat_to_completion_decorator( | ||
func: Callable[..., ChatResponse] | ||
) -> Callable[..., CompletionResponse]: | ||
"""Convert a chat function to a completion function.""" | ||
|
||
def wrapper(prompt: str, **kwargs: Any) -> CompletionResponse: | ||
# normalize input | ||
messages = prompt_to_messages(prompt) | ||
chat_response = func(messages, **kwargs) | ||
# normalize output | ||
return chat_response_to_completion_response(chat_response) | ||
|
||
return wrapper | ||
|
||
|
||
def stream_chat_to_completion_decorator( | ||
func: Callable[..., ChatResponseGen] | ||
) -> Callable[..., CompletionResponseGen]: | ||
"""Convert a chat function to a completion function.""" | ||
|
||
def wrapper(prompt: str, **kwargs: Any) -> CompletionResponseGen: | ||
# normalize input | ||
messages = prompt_to_messages(prompt) | ||
chat_response = func(messages, **kwargs) | ||
# normalize output | ||
return stream_chat_response_to_completion_response(chat_response) | ||
|
||
return wrapper |
Oops, something went wrong.