From 79e823048c86739433e55df923c7b20fd0f2a3e6 Mon Sep 17 00:00:00 2001 From: Jacob Lee Date: Wed, 20 Sep 2023 19:50:55 -0700 Subject: [PATCH] Update quickstart and additional resources (#2690) --- .../docs/get_started/quickstart.mdx | 261 ++++++++++++------ docs/extras/additional_resources/scrimba.mdx | 16 ++ docs/snippets/get_started/introduction.mdx | 19 +- .../quickstart/agents_chat_models.mdx | 26 -- .../get_started/quickstart/agents_llms.mdx | 48 ---- .../quickstart/chains_chat_models.mdx | 34 --- .../get_started/quickstart/chains_llms.mdx | 27 -- .../get_started/quickstart/chat_model.mdx | 27 -- docs/snippets/get_started/quickstart/llm.mdx | 15 - .../quickstart/memory_chat_models.mdx | 50 ---- .../get_started/quickstart/memory_llms.mdx | 57 ---- .../prompt_templates_chat_models.mdx | 38 --- .../quickstart/prompt_templates_llms.mdx | 13 - 13 files changed, 209 insertions(+), 422 deletions(-) create mode 100644 docs/extras/additional_resources/scrimba.mdx delete mode 100644 docs/snippets/get_started/quickstart/agents_chat_models.mdx delete mode 100644 docs/snippets/get_started/quickstart/agents_llms.mdx delete mode 100644 docs/snippets/get_started/quickstart/chains_chat_models.mdx delete mode 100644 docs/snippets/get_started/quickstart/chains_llms.mdx delete mode 100644 docs/snippets/get_started/quickstart/chat_model.mdx delete mode 100644 docs/snippets/get_started/quickstart/llm.mdx delete mode 100644 docs/snippets/get_started/quickstart/memory_chat_models.mdx delete mode 100644 docs/snippets/get_started/quickstart/memory_llms.mdx delete mode 100644 docs/snippets/get_started/quickstart/prompt_templates_chat_models.mdx delete mode 100644 docs/snippets/get_started/quickstart/prompt_templates_llms.mdx diff --git a/docs/docs_skeleton/docs/get_started/quickstart.mdx b/docs/docs_skeleton/docs/get_started/quickstart.mdx index 883674ab6ca4..bbb229454c03 100644 --- a/docs/docs_skeleton/docs/get_started/quickstart.mdx +++ b/docs/docs_skeleton/docs/get_started/quickstart.mdx @@ -4,8 +4,6 @@ To install LangChain run: -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; import Install from "@snippets/get_started/quickstart/installation.mdx" @@ -24,129 +22,236 @@ import OpenAISetup from "@snippets/get_started/quickstart/openai_setup.mdx" Now we can start building our language model application. LangChain provides many modules that can be used to build language model applications. Modules can be used as stand-alones in simple applications and they can be combined for more complex use cases. +The most common and most important chain that LangChain helps create contains three things: + +- LLM: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. +- Prompt Templates: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. +- Output Parsers: These translate the raw response from the LLM to a more workable format, making it easy to use the output downstream. + +In this getting started guide we will cover those three components by themselves, and then go over how to combine all of them. Understanding these concepts will set you up well for being able to use and customize LangChain applications. Most LangChain applications allow you to configure the LLM and/or the prompt used, so knowing how to take advantage of this will be a big enabler. + ## LLMs -#### Get predictions from a language model -The basic building block of LangChain is the LLM, which takes in text and generates more text. +There are two types of language models, which in LangChain are called: -As an example, suppose we're building an application that generates a company name based on a company description. In order to do this, we need to initialize an OpenAI model wrapper. In this case, since we want the outputs to be MORE random, we'll initialize our model with a HIGH temperature. +- LLMs: this is a language model which takes a string as input and returns a string +- ChatModels: this is a language model which takes a list of messages as input and returns a message -import LLM from "@snippets/get_started/quickstart/llm.mdx" +The input/output for LLMs is simple and easy to understand - a string. But what about ChatModels? The input there is a list of `ChatMessage`s, and the output is a single `ChatMessage`. A `ChatMessage` has two required components: - +- `content`: This is the content of the message. +- `role`: This is the role of the entity from which the `ChatMessage` is coming from. -## Chat models +LangChain provides several objects to easily distinguish between different roles: -Chat models are a variation on language models. While chat models use language models under the hood, the interface they expose is a bit different: rather than expose a "text in, text out" API, they expose an interface where "chat messages" are the inputs and outputs. +- `HumanMessage`: A `ChatMessage` coming from a human/user. +- `AIMessage`: A `ChatMessage` coming from an AI/assistant. +- `SystemMessage`: A `ChatMessage` coming from the system. +- `FunctionMessage`: A `ChatMessage` coming from a function call. -You can get chat completions by passing one or more messages to the chat model. The response will be a message. The types of messages currently supported in LangChain are `AIMessage`, `HumanMessage`, `SystemMessage`, `FunctionMessage`, and `ChatMessage` -- `ChatMessage` takes in an arbitrary role parameter. Most of the time, you'll just be dealing with `HumanMessage`, `AIMessage`, and `SystemMessage`. +If none of those roles sound right, there is also a `ChatMessage` class where you can specify the role manually. For more information on how to use these different messages most effectively, see our prompting guide. -import ChatModel from "@snippets/get_started/quickstart/chat_model.mdx" +LangChain provides a standard interface for both, but it's useful to understand this difference in order to construct prompts for a given language model. The standard interface that LangChain provides has two methods: - +- `predict`: Takes in a string, returns a string +- `predictMessages`: Takes in a list of messages, returns a message. -## Prompt templates +Let's see how to work with these different types of models and these different types of inputs. First, let's import an LLM and a ChatModel and call `predict`. -Most LLM applications do not pass user input directly into an LLM. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. +```typescript +import { OpenAI } from "langchain/llms/openai"; +import { ChatOpenAI } from "langchain/chat_models/openai"; -In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it'd be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions. +const llm = new OpenAI({ + temperature: 0.9, +}); + +const chatModel = new ChatOpenAI(); + +const text = "What would be a good company name for a company that makes colorful socks?"; + +const llmResult = await llm.predict(text); +/* + "Feetful of Fun" +*/ -import PromptTemplateLLM from "@snippets/get_started/quickstart/prompt_templates_llms.mdx" -import PromptTemplateChatModel from "@snippets/get_started/quickstart/prompt_templates_chat_models.mdx" +const chatModelResult = await chatModel.predict(text); +/* + "Socks O'Color" +*/ +``` - - +The `OpenAI` and `ChatOpenAI` objects are basically just configuration objects. You can initialize them with parameters like temperature and others, and pass them around. -With PromptTemplates this is easy! In this case our template would be very simple: +Next, let's use the `predictMessages` method to run over a list of messages. - - - +```typescript +import { HumanMessage } from "langchain/schema"; -Similar to LLMs, you can make use of templating by using a `MessagePromptTemplate`. You can build a `ChatPromptTemplate` from one or more `MessagePromptTemplate`s. You can use `ChatPromptTemplate`'s `format_messages` method to generate the formatted messages. +const text = "What would be a good company name for a company that makes colorful socks?"; -Because this is generating a list of messages, it is slightly more complex than the normal prompt template which is generating only a string. Please see the detailed guides on prompts to understand more options available to you here. +const messages = [new HumanMessage({ content: text })]; + +const llmResult = await llm.predictMessages(messages); +/* + AIMessage { + content: "Feetful of Fun" + } +*/ + +const chatModelResult = await chatModel.predictMessages(messages); +/* + AIMessage { + content: "Socks O'Color" + } +*/ +``` + +For both these methods, you can also pass in parameters as keyword arguments. For example, you could pass in `temperature: 0` to adjust the temperature that is used from what the object was configured with. Whatever values are passed in during run time will always override what the object was configured with. + +## Prompt templates + +Most LLM applications do not pass user input directly into an LLM. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. + +In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it'd be great if the user only had to provide the description of a company/product, without having to worry about giving the model instructions. - - - +PromptTemplates help with exactly this! They bundle up all the logic for going from user input into a fully formatted prompt. This can start off very simple - for example, a prompt to produce the above string would just be: -## Chains +```typescript +import { PromptTemplate } from "langchain/prompts"; -Now that we've got a model and a prompt template, we'll want to combine the two. Chains give us a way to link (or chain) together multiple primitives, like models, prompts, and other chains. +const prompt = PromptTemplate.fromTemplate("What is a good name for a company that makes {product}?"); -import ChainLLM from "@snippets/get_started/quickstart/chains_llms.mdx" -import ChainChatModel from "@snippets/get_started/quickstart/chains_chat_models.mdx" +const formattedPrompt = await prompt.format({ + product: "colorful socks", +}); +/* + "What is a good name for a company that makes colorful socks?" +*/ +``` - - +There are several advantages to using these over raw string formatting. You can "partial" out variables - e.g. you can format only some of the variables at a time. You can compose them together, easily combining different templates into a single prompt. For explanations of these functionalities, see the [section on prompts](/docs/modules/model_io/prompts) for more detail. -The simplest and most common type of chain is an LLMChain, which passes an input first to a PromptTemplate and then to an LLM. We can construct an LLM chain from our existing model and prompt template. +PromptTemplates can also be used to produce a list of messages. In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc). Here, what happens most often is a ChatPromptTemplate is a list of ChatMessageTemplates. Each ChatMessageTemplate contains instructions for how to format that ChatMessage - its role, and then also its content. Let's take a look at this below: - +```typescript +import { ChatPromptTemplate } from "langchain/prompts"; -There we go, our first chain! Understanding how this simple chain works will set you up well for working with more complex chains. +const template = "You are a helpful assistant that translates {input_language} to {output_language}."; +const humanTemplate = "{text}"; - - +const chatPrompt = ChatPromptTemplate.fromMessages([ + ["system", template], + ["human", humanTemplate], +]); -The `LLMChain` can be used with chat models as well: +const formattedChatPrompt = await chatPrompt.formatMessages({ + input_language: "English", + output_language: "French", + text: "I love programming.", +}); - - - +/* + [ + SystemMessage { + content: 'You are a helpful assistant that translates English to French.' + }, + HumanMessage { content: 'I love programming.' } + ] +*/ +``` -## Agents +ChatPromptTemplates can also be constructed in other ways - see the [section on prompts](/docs/modules/model_io/prompts) for more detail. -import AgentLLM from "@snippets/get_started/quickstart/agents_llms.mdx" -import AgentChatModel from "@snippets/get_started/quickstart/agents_chat_models.mdx" +## Output parsers -Our first chain ran a pre-determined sequence of steps. To handle complex workflows, we need to be able to dynamically choose actions based on inputs. +OutputParsers convert the raw output of an LLM into a format that can be used downstream. There are few main type of OutputParsers, including: -Agents do just this: they use a language model to determine which actions to take and in what order. Agents are given access to tools, and they repeatedly choose a tool, run the tool, and observe the output until they come up with a final answer. +- Convert text from LLM -> structured information (e.g. JSON) +- Convert a ChatMessage into just a string +- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string. -To load an agent, you need to choose a(n): -- LLM/Chat model: The language model powering the agent. -- Tool(s): A function that performs a specific duty. This can be things like: Google Search, Database lookup, Python REPL, other chains. For a list of predefined tools and their specifications, see the [Tools documentation](/docs/modules/agents/tools/). -- Agent name: A string that references a supported agent class. An agent class is largely parameterized by the prompt the language model uses to determine which action to take. Because this notebook focuses on the simplest, highest level API, this only covers using the standard supported agents. If you want to implement a custom agent, see [here](/docs/modules/agents). For a list of supported agents and their specifications, see [here](/docs/modules/agents/agent_types/). +For more information, see the [section on output parsers](/docs/modules/model_io/output_parsers). -For this example, we'll be using SerpAPI to query a search engine. +In this getting started guide, we will write our own output parser - one that converts a comma separated list into a list. -You'll need to set the `SERPAPI_API_KEY` environment variable. +```typescript +import { BaseOutputParser } from "langchain/schema/output_parser"; - - - - - +/** + * Parse the output of an LLM call to a comma-separated list. + */ +class CommaSeparatedListOutputParser extends BaseOutputParser { + async parse(text: string): Promise { + return text.split(",").map((item) => item.trim()); + } +} -Agents can also be used with chat models. There are a few varieties, but if using OpenAI and a functions-capable model, you can use `openai-functions` as the agent type. +const parser = new CommaSeparatedListOutputParser(); - - - +const result = await parser.parse("hi, bye"); +/* + ['hi', 'bye'] +*/ +``` -## Memory +## PromptTemplate + LLM + OutputParser -The chains and agents we've looked at so far have been stateless, but for many applications it's necessary to reference past interactions. This is clearly the case with a chatbot for example, where you want it to understand new messages in the context of past messages. +We can now combine all these into one chain. This chain will take input variables, pass those to a prompt template to create a prompt, pass the prompt to a language model, and then pass the output through an (optional) output parser. This is a convenient way to bundle up a modular piece of logic. Let's see it in action! -The Memory module gives you a way to maintain application state. The base Memory interface is simple: it lets you update state given the latest run inputs and outputs and it lets you modify (or contextualize) the next input using the stored state. +```typescript +import { ChatOpenAI } from "langchain/chat_models/openai"; +import { ChatPromptTemplate } from "langchain/prompts"; +import { BaseOutputParser } from "langchain/schema/output_parser"; -There are a number of built-in memory systems. The simplest of these is a buffer memory which just prepends the last few inputs/outputs to the current input - we will use this in the example below. +/** + * Parse the output of an LLM call to a comma-separated list. + */ +class CommaSeparatedListOutputParser extends BaseOutputParser { + async parse(text: string): Promise { + return text.split(",").map((item) => item.trim()); + } +} -import MemoryLLM from "@snippets/get_started/quickstart/memory_llms.mdx" -import MemoryChatModel from "@snippets/get_started/quickstart/memory_chat_models.mdx" +const template = `You are a helpful assistant who generates comma separated lists. +A user will pass in a category, and you should generate 5 objects in that category in a comma separated list. +ONLY return a comma separated list, and nothing more.`; - - +const humanTemplate = "{text}"; - - - +/** + * Chat prompt for generating comma-separated lists. It combines the system + * template and the human template. + */ +const chatPrompt = ChatPromptTemplate.fromMessages( + [ + ["system", template], + ["human", humanTemplate], + ] +); -You can use Memory with chains and agents initialized with chat models. The main difference between this and Memory for LLMs is that rather than trying to condense all previous messages into a string, we can keep them as their own unique memory object. +const model = new ChatOpenAI({}); +const parser = new CommaSeparatedListOutputParser(); - +const chain = chatPrompt.pipe(model).pipe(parser); + +const result = await chain.invoke({ + text: "colors", +}); + +/* + ["red", "blue", "green", "yellow", "orange"] +*/ +``` + +Note that we are using the `.pipe()` method to join these components together. This `.pipe()` method is part of the LangChain Expression Language. To learn more about this syntax, read the [documentation here](/docs/expression_language). + +## Next steps + +And that's it for the quickstart! We've now gone over how to create the core building block of LangChain applications. There is a lot more nuance in all these components (LLMs, prompts, output parsers) and a lot more different components to learn about as well. To continue on your journey: - - +- [Dive deeper](/docs/modules/model_io) into LLMs, prompts, and output parsers +- Learn the other [key components](/docs/modules) +- Read up on [LangChain Expression Language](/docs/expression_language) to learn how to chain these components together +- Check out our [helpful guides](/docs/guides) for detailed walkthroughs on particular topics +- Explore [end-to-end use cases](/docs/use_cases/) diff --git a/docs/extras/additional_resources/scrimba.mdx b/docs/extras/additional_resources/scrimba.mdx new file mode 100644 index 000000000000..f8d1b7228eaf --- /dev/null +++ b/docs/extras/additional_resources/scrimba.mdx @@ -0,0 +1,16 @@ +# Scrimba interactive guides + +[Scrimba](https://scrimba.com) is a code-learning platform that allows you to interactively edit and run +code while watching a video walkthrough. + +We've partnered with Scrimba on course materials (called "scrims") that teach the fundamentals of building with LangChain.js - +check them out below, and check back for more as they become available! + +## LangChain Expression Language (LCEL) + +- [The basics (PromptTemplate + LLM)](https://scrimba.com/scrim/c6rD6Nt9) + +## Deeper dives + +- [Setting up a new `PromptTemplate`](https://scrimba.com/scrim/cbGwRwuV) +- [Setting up `ChatOpenAI` parameters](https://scrimba.com/scrim/cEgbBBUw) diff --git a/docs/snippets/get_started/introduction.mdx b/docs/snippets/get_started/introduction.mdx index b9a7602ff426..0f6b8db354da 100644 --- a/docs/snippets/get_started/introduction.mdx +++ b/docs/snippets/get_started/introduction.mdx @@ -1,6 +1,6 @@ -**LangChain** is a framework for developing applications powered by language models. It enables applications that are: -- **Data-aware**: connect a language model to other sources of data -- **Agentic**: allow a language model to interact with its environment +**LangChain** is a framework for developing applications powered by language models. It enables applications that: +- **Are context-aware**: connect a language model to other sources of context (prompt instructions, few shot examples, content to ground it's response in) +- **Reason**: rely on a language model to reason (about how to answer based on provided context, what actions to take, etc) The main value props of LangChain are: 1. **Components**: abstractions for working with language models, along with a collection of implementations for each abstraction. Components are modular and easy-to-use, whether you are using the rest of the LangChain framework or not @@ -22,7 +22,7 @@ LangChain provides standard, extendable interfaces and external integrations for #### [Model I/O](/docs/modules/model_io/) Interface with language models -#### [Data connection](/docs/modules/data_connection/) +#### [Retrieval](/docs/modules/data_connection/) Interface with application-specific data #### [Chains](/docs/modules/chains/) Construct sequences of calls @@ -41,10 +41,11 @@ Walkthroughs and best-practices for common end-to-end use cases, like: - [Analyzing structured data](/docs/use_cases/tabular) - and much more... -### [Additional resources](/docs/additional_resources/) -Our community is full of prolific developers, creative builders, and fantastic teachers. Check out the [Gallery](https://github.com/kyrolabs/awesome-langchain) for a list of awesome LangChain projects, compiled by the folks at [KyroLabs](https://kyrolabs.com). - -

Support

+### [Guides](/docs/guides/) +Learn best practices for developing with LangChain. -Join us on [GitHub](https://github.com/hwchase17/langchainjs) or [Discord](https://discord.gg/6adMQxSpJS) to ask questions, share feedback, meet other developers building with LangChain, and dream about the future of LLM’s. +### [Additional resources](/docs/additional_resources/) +Our community is full of prolific developers, creative builders, and fantastic teachers. Check out [Scrimba](/docs/additional_resources/scrimba) for a series of interactive guides on how to get started with various concepts, and [Gallery](https://github.com/kyrolabs/awesome-langchain) for a list of awesome LangChain projects, compiled by the folks at [KyroLabs](https://kyrolabs.com). +### [Community](/docs/community) +Head to the [Community navigator](/docs/community) to find places to ask questions, share feedback, meet other developers, and dream about the future of LLM’s. diff --git a/docs/snippets/get_started/quickstart/agents_chat_models.mdx b/docs/snippets/get_started/quickstart/agents_chat_models.mdx deleted file mode 100644 index fbf9687c1b10..000000000000 --- a/docs/snippets/get_started/quickstart/agents_chat_models.mdx +++ /dev/null @@ -1,26 +0,0 @@ -```typescript -import { initializeAgentExecutorWithOptions } from "langchain/agents"; -import { ChatOpenAI } from "langchain/chat_models/openai"; -import { SerpAPI } from "langchain/tools"; -import { Calculator } from "langchain/tools/calculator"; - -const executor = await initializeAgentExecutorWithOptions( - [new Calculator(), new SerpAPI()], - new ChatOpenAI({ modelName: "gpt-4-0613", temperature: 0 }), - { - agentType: "openai-functions", - verbose: true, - } -); - -const result = await executor.run("What is the temperature in New York?"); -``` - -```typescript -/* - { - "output": "The current temperature in New York is 89°F, but it feels like 92°F. Please be cautious as the heat can lead to dehydration or heat stroke." - } -*/ -``` - diff --git a/docs/snippets/get_started/quickstart/agents_llms.mdx b/docs/snippets/get_started/quickstart/agents_llms.mdx deleted file mode 100644 index c09f9be681e8..000000000000 --- a/docs/snippets/get_started/quickstart/agents_llms.mdx +++ /dev/null @@ -1,48 +0,0 @@ -```typescript -import { initializeAgentExecutorWithOptions } from "langchain/agents"; -import { OpenAI } from "langchain/llms/openai"; -import { SerpAPI } from "langchain/tools"; -import { Calculator } from "langchain/tools/calculator"; - -const model = new OpenAI({ temperature: 0 }); -const tools = [ - new SerpAPI(process.env.SERPAPI_API_KEY, { - location: "Austin,Texas,United States", - hl: "en", - gl: "us", - }), - new Calculator(), -]; - -const executor = await initializeAgentExecutorWithOptions(tools, model, { - agentType: "zero-shot-react-description", - verbose: true, -}); - -const input = "What was the high temperature in SF yesterday in Fahrenheit? What is that number raised to the .023 power?"; - -const result = await executor.call({ - input, -}); -``` -```console -> Entering new AgentExecutor chain... - -Thought: I need to find the temperature first, then use the calculator to raise it to the .023 power. -Action: Search -Action Input: "High temperature in SF yesterday" -Observation: San Francisco Temperature Yesterday. Maximum temperature yesterday: 57 °F (at 1:56 pm) Minimum temperature yesterday: 49 °F (at 1:56 am) Average temperature ... - -Thought: I now have the temperature, so I can use the calculator to raise it to the .023 power. -Action: Calculator -Action Input: 57^.023 -Observation: Answer: 1.0974509573251117 - -Thought: I now know the final answer -Final Answer: 1.0974509573251117. - -> Finished chain. -``` -```typescript -// { output: "1.0974509573251117" } -``` diff --git a/docs/snippets/get_started/quickstart/chains_chat_models.mdx b/docs/snippets/get_started/quickstart/chains_chat_models.mdx deleted file mode 100644 index 76af37956023..000000000000 --- a/docs/snippets/get_started/quickstart/chains_chat_models.mdx +++ /dev/null @@ -1,34 +0,0 @@ -```typescript -import { ChatOpenAI } from "langchain/chat_models/openai"; -import { LLMChain } from "langchain/chains"; -import { - ChatPromptTemplate, - SystemMessagePromptTemplate, - HumanMessagePromptTemplate -} from "langchain/prompts"; - -const template = "You are a helpful assistant that translates {input_language} to {output_language}."; -const systemMessagePrompt = SystemMessagePromptTemplate.fromTemplate(template); -const humanTemplate = "{text}"; -const humanMessagePrompt = HumanMessagePromptTemplate.fromTemplate(humanTemplate); - -const chatPrompt = ChatPromptTemplate.fromMessages([systemMessagePrompt, humanMessagePrompt]); - -const chat = new ChatOpenAI({ - temperature: 0, -}); - -const chain = new LLMChain({ - llm: chat, - prompt: chatPrompt, -}); - -const result = await chain.call({ - input_language: "English", - output_language: "French", - text: "I love programming", -}); -``` -```typescript -// { text: "J'adore programmer" } -``` diff --git a/docs/snippets/get_started/quickstart/chains_llms.mdx b/docs/snippets/get_started/quickstart/chains_llms.mdx deleted file mode 100644 index 1fb4531c5e72..000000000000 --- a/docs/snippets/get_started/quickstart/chains_llms.mdx +++ /dev/null @@ -1,27 +0,0 @@ -Using this we can replace - -```typescript -const result = await llm.predict("What would be a good company name for a company that makes colorful socks?"); -``` - -with - -```typescript -import { OpenAI } from "langchain/llms/openai"; -import { LLMChain } from "langchain/chains"; -import { PromptTemplate } from "langchain/prompts"; - -const llm = new OpenAI({}); -const prompt = PromptTemplate.fromTemplate("What is a good name for a company that makes {product}?"); - -const chain = new LLMChain({ - llm, - prompt -}); - -// Run is a convenience method for chains with prompts that require one input and one output. -const result = await chain.run("colorful socks"); -``` -```typescript -"Feetful of Fun" -``` diff --git a/docs/snippets/get_started/quickstart/chat_model.mdx b/docs/snippets/get_started/quickstart/chat_model.mdx deleted file mode 100644 index 6f3a487970bb..000000000000 --- a/docs/snippets/get_started/quickstart/chat_model.mdx +++ /dev/null @@ -1,27 +0,0 @@ -```typescript -import { ChatOpenAI } from "langchain/chat_models/openai"; -import { HumanMessage, ChatMessage, SystemMessage } from "langchain/schema"; - -const chat = new ChatOpenAI({ - temperature: 0 -}); - -const result = await chat.predictMessages([ - new HumanMessage("Translate this sentence from English to French. I love programming.") -]); - -/* - AIMessage { - content: "J'adore la programmation." - } -*/ -``` - -It is useful to understand how chat models are different from a normal LLM, but it can often be handy to just be able to treat them the same. -LangChain makes that easy by also exposing an interface through which you can interact with a chat model as you would a normal LLM. -You can access this through the `predict` interface. - -```typescript -const result = await chat.predict("Translate this sentence from English to French. I love programming.") -// "J'adore la programmation." -``` diff --git a/docs/snippets/get_started/quickstart/llm.mdx b/docs/snippets/get_started/quickstart/llm.mdx deleted file mode 100644 index a11fd9ba34eb..000000000000 --- a/docs/snippets/get_started/quickstart/llm.mdx +++ /dev/null @@ -1,15 +0,0 @@ -```typescript -import { OpenAI } from "langchain/llms/openai"; - -const llm = new OpenAI({ - temperature: 0.9, -}); -``` - -And now we can pass in text and get predictions! - -```typescript -const result = await llm.predict("What would be a good company name for a company that makes colorful socks?"); -// "Feetful of Fun" -``` - diff --git a/docs/snippets/get_started/quickstart/memory_chat_models.mdx b/docs/snippets/get_started/quickstart/memory_chat_models.mdx deleted file mode 100644 index 9a0407429723..000000000000 --- a/docs/snippets/get_started/quickstart/memory_chat_models.mdx +++ /dev/null @@ -1,50 +0,0 @@ -```typescript -import { ConversationChain } from "langchain/chains"; -import { ChatOpenAI } from "langchain/chat_models/openai"; -import { - ChatPromptTemplate, - HumanMessagePromptTemplate, - SystemMessagePromptTemplate, - MessagesPlaceholder, -} from "langchain/prompts"; -import { BufferMemory } from "langchain/memory"; - -const chat = new ChatOpenAI({ temperature: 0 }); - -const chatPrompt = ChatPromptTemplate.fromMessages([ - SystemMessagePromptTemplate.fromTemplate( - "The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know." - ), - new MessagesPlaceholder("history"), - HumanMessagePromptTemplate.fromTemplate("{input}"), -]); - -// Return the current conversation directly as messages and insert them into the MessagesPlaceholder in the above prompt. -const memory = new BufferMemory({ - returnMessages: true, - memoryKey: "history" -}); - -const chain = new ConversationChain({ - memory, - prompt: chatPrompt, - llm: chat, - verbose: true, -}); - -const res = await chain.call({ - input: "My name is Jim.", -}); -``` -```console -Hello Jim! It's nice to meet you. How can I assist you today? -``` - -```typescript -const res2 = await chain.call({ - input: "What is my name?", -}); -``` -```console -Your name is Jim. You mentioned it at the beginning of our conversation. Is there anything specific you would like to know or discuss, Jim? -``` diff --git a/docs/snippets/get_started/quickstart/memory_llms.mdx b/docs/snippets/get_started/quickstart/memory_llms.mdx deleted file mode 100644 index 820894061e06..000000000000 --- a/docs/snippets/get_started/quickstart/memory_llms.mdx +++ /dev/null @@ -1,57 +0,0 @@ -```typescript -import { OpenAI } from "langchain/llms/openai"; -import { BufferMemory } from "langchain/memory"; -import { ConversationChain } from "langchain/chains"; - -const model = new OpenAI({}); -const memory = new BufferMemory(); -const chain = new ConversationChain({ - llm: model, - memory, - verbose: true, -}); -const res1 = await chain.call({ input: "Hi! I'm Jim." }); -``` - -here's what's going on under the hood - -```console -> Entering new chain... -Prompt after formatting: -The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. - -Current conversation: - -Human: Hi there! -AI: - -> Finished chain. - ->> 'Hello! How are you today?' -``` - -Now if we run the chain again - -```typescript -const res2 = await chain.call({ input: "What's my name?" }); -``` - -we'll see that the full prompt that's passed to the model contains the input and output of our first interaction, along with our latest input - -```console -> Entering new chain... -Prompt after formatting: -The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. - -Current conversation: - -Human: Hi there! -AI: Hello! How are you today? -Human: I'm doing well! Just having a conversation with an AI. -AI: - -> Finished chain. - ->> "Your name is Jim." -``` - diff --git a/docs/snippets/get_started/quickstart/prompt_templates_chat_models.mdx b/docs/snippets/get_started/quickstart/prompt_templates_chat_models.mdx deleted file mode 100644 index 71e36e3c9845..000000000000 --- a/docs/snippets/get_started/quickstart/prompt_templates_chat_models.mdx +++ /dev/null @@ -1,38 +0,0 @@ -```typescript -import { - ChatPromptTemplate, - SystemMessagePromptTemplate, - HumanMessagePromptTemplate -} from "langchain/prompts"; - -const template = "You are a helpful assistant that translates {input_language} to {output_language}."; -const systemMessagePrompt = SystemMessagePromptTemplate.fromTemplate(template); -const humanTemplate = "{text}"; -const humanMessagePrompt = HumanMessagePromptTemplate.fromTemplate(humanTemplate); - -// You can also pass ["{role}", "{template}"] tuples into the `.fromMessages()` method -// and they will be automatically converted into message prompts. -// const systemMessagePrompt = ["system", template]; -// const humanMessagePrompt = ["user", humanTemplate]; - -const chatPrompt = ChatPromptTemplate.fromMessages([systemMessagePrompt, humanMessagePrompt]); - -const formattedPrompt = await chatPrompt.formatMessages({ - input_language: "English", - output_language: "French", - text: "I love programming." -}); -``` - -```typescript -/* - [ - SystemMessage { - content: 'You are a helpful assistant that translates English to French.' - }, - HumanMessage { - content: 'I love programming.' - } - ] -*/ -``` diff --git a/docs/snippets/get_started/quickstart/prompt_templates_llms.mdx b/docs/snippets/get_started/quickstart/prompt_templates_llms.mdx deleted file mode 100644 index 208fbc805038..000000000000 --- a/docs/snippets/get_started/quickstart/prompt_templates_llms.mdx +++ /dev/null @@ -1,13 +0,0 @@ -```typescript -import { PromptTemplate } from "langchain/prompts"; - -const prompt = PromptTemplate.fromTemplate("What is a good name for a company that makes {product}?"); - -const formattedPrompt = await prompt.format({ - product: "colorful socks" -}); -``` - -```typescript -"What is a good name for a company that makes colorful socks?" -```