From 0b4a88a0cfce4550f6f5adf15e9b11741a8eb352 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sat, 30 Dec 2023 17:58:19 -0500 Subject: [PATCH 01/59] WIP(backend/api): custom endpoint --- api/app/clients/OpenAIClient.js | 4 +- api/models/schema/defaults.js | 1 - api/server/middleware/buildEndpointOption.js | 1 + api/server/routes/ask/custom.js | 20 +++++++++ api/server/routes/ask/index.js | 2 + api/server/routes/edit/custom.js | 20 +++++++++ api/server/routes/edit/index.js | 2 + api/server/services/Config/EndpointService.js | 2 + .../services/Config/loadDefaultEConfig.js | 4 +- .../services/Config/loadDefaultModels.js | 3 ++ api/server/services/Endpoints/custom/index.js | 5 +++ .../Endpoints/custom/initializeClient.js | 44 +++++++++++++++++++ api/server/services/ModelService.js | 11 +++++ api/typedefs.js | 5 ++- api/utils/tokens.js | 31 +++++++------ 15 files changed, 135 insertions(+), 20 deletions(-) create mode 100644 api/server/routes/ask/custom.js create mode 100644 api/server/routes/edit/custom.js create mode 100644 api/server/services/Endpoints/custom/index.js create mode 100644 api/server/services/Endpoints/custom/initializeClient.js diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index f0dbc366bba..25287fa29b3 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -1,6 +1,6 @@ const OpenAI = require('openai'); const { HttpsProxyAgent } = require('https-proxy-agent'); -const { getResponseSender, EModelEndpoint } = require('librechat-data-provider'); +const { getResponseSender } = require('librechat-data-provider'); const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images'); const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils'); @@ -146,7 +146,7 @@ class OpenAIClient extends BaseClient { this.options.sender ?? getResponseSender({ model: this.modelOptions.model, - endpoint: EModelEndpoint.openAI, + endpoint: this.options.endpoint, chatGptLabel: this.options.chatGptLabel, }); diff --git a/api/models/schema/defaults.js b/api/models/schema/defaults.js index 338ee120891..c3bcee5c739 100644 --- a/api/models/schema/defaults.js +++ b/api/models/schema/defaults.js @@ -95,7 +95,6 @@ const agentOptions = { // default: null, required: false, }, - // for google only modelLabel: { type: String, // default: null, diff --git a/api/server/middleware/buildEndpointOption.js b/api/server/middleware/buildEndpointOption.js index d98fe92d2ce..a72566aead9 100644 --- a/api/server/middleware/buildEndpointOption.js +++ b/api/server/middleware/buildEndpointOption.js @@ -8,6 +8,7 @@ const { parseConvo, EModelEndpoint } = require('librechat-data-provider'); const buildFunction = { [EModelEndpoint.openAI]: openAI.buildOptions, [EModelEndpoint.google]: google.buildOptions, + [EModelEndpoint.custom]: openAI.buildOptions, [EModelEndpoint.azureOpenAI]: openAI.buildOptions, [EModelEndpoint.anthropic]: anthropic.buildOptions, [EModelEndpoint.gptPlugins]: gptPlugins.buildOptions, diff --git a/api/server/routes/ask/custom.js b/api/server/routes/ask/custom.js new file mode 100644 index 00000000000..ef979bf0000 --- /dev/null +++ b/api/server/routes/ask/custom.js @@ -0,0 +1,20 @@ +const express = require('express'); +const AskController = require('~/server/controllers/AskController'); +const { initializeClient } = require('~/server/services/Endpoints/custom'); +const { addTitle } = require('~/server/services/Endpoints/openAI'); +const { + handleAbort, + setHeaders, + validateEndpoint, + buildEndpointOption, +} = require('~/server/middleware'); + +const router = express.Router(); + +router.post('/abort', handleAbort()); + +router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => { + await AskController(req, res, next, initializeClient, addTitle); +}); + +module.exports = router; diff --git a/api/server/routes/ask/index.js b/api/server/routes/ask/index.js index 669fd87e6fb..b5156ed8d10 100644 --- a/api/server/routes/ask/index.js +++ b/api/server/routes/ask/index.js @@ -1,5 +1,6 @@ const express = require('express'); const openAI = require('./openAI'); +const custom = require('./custom'); const google = require('./google'); const bingAI = require('./bingAI'); const anthropic = require('./anthropic'); @@ -42,5 +43,6 @@ router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins); router.use(`/${EModelEndpoint.anthropic}`, anthropic); router.use(`/${EModelEndpoint.google}`, google); router.use(`/${EModelEndpoint.bingAI}`, bingAI); +router.use(`/${EModelEndpoint.custom}`, custom); module.exports = router; diff --git a/api/server/routes/edit/custom.js b/api/server/routes/edit/custom.js new file mode 100644 index 00000000000..dd63c96c8f9 --- /dev/null +++ b/api/server/routes/edit/custom.js @@ -0,0 +1,20 @@ +const express = require('express'); +const EditController = require('~/server/controllers/EditController'); +const { initializeClient } = require('~/server/services/Endpoints/custom'); +const { addTitle } = require('~/server/services/Endpoints/openAI'); +const { + handleAbort, + setHeaders, + validateEndpoint, + buildEndpointOption, +} = require('~/server/middleware'); + +const router = express.Router(); + +router.post('/abort', handleAbort()); + +router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => { + await EditController(req, res, next, initializeClient, addTitle); +}); + +module.exports = router; diff --git a/api/server/routes/edit/index.js b/api/server/routes/edit/index.js index 01dd06ced98..fa19f9effdc 100644 --- a/api/server/routes/edit/index.js +++ b/api/server/routes/edit/index.js @@ -1,5 +1,6 @@ const express = require('express'); const openAI = require('./openAI'); +const custom = require('./custom'); const google = require('./google'); const anthropic = require('./anthropic'); const gptPlugins = require('./gptPlugins'); @@ -38,5 +39,6 @@ router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], open router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins); router.use(`/${EModelEndpoint.anthropic}`, anthropic); router.use(`/${EModelEndpoint.google}`, google); +router.use(`/${EModelEndpoint.custom}`, custom); module.exports = router; diff --git a/api/server/services/Config/EndpointService.js b/api/server/services/Config/EndpointService.js index 998e7a83d03..b4cee9c77a8 100644 --- a/api/server/services/Config/EndpointService.js +++ b/api/server/services/Config/EndpointService.js @@ -4,6 +4,7 @@ const { OPENAI_API_KEY: openAIApiKey, AZURE_API_KEY: azureOpenAIApiKey, ANTHROPIC_API_KEY: anthropicApiKey, + CUSTOM_API_KEY: customApiKey, CHATGPT_TOKEN: chatGPTToken, BINGAI_TOKEN: bingToken, PLUGINS_USE_AZURE, @@ -33,5 +34,6 @@ module.exports = { [EModelEndpoint.chatGPTBrowser]: isUserProvided(chatGPTToken), [EModelEndpoint.anthropic]: isUserProvided(anthropicApiKey), [EModelEndpoint.bingAI]: isUserProvided(bingToken), + [EModelEndpoint.custom]: isUserProvided(customApiKey), }, }; diff --git a/api/server/services/Config/loadDefaultEConfig.js b/api/server/services/Config/loadDefaultEConfig.js index 34ab05d8ab8..620a3089b16 100644 --- a/api/server/services/Config/loadDefaultEConfig.js +++ b/api/server/services/Config/loadDefaultEConfig.js @@ -9,7 +9,7 @@ const { config } = require('./EndpointService'); */ async function loadDefaultEndpointsConfig() { const { google, gptPlugins } = await loadAsyncEndpoints(); - const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config; + const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser, custom } = config; let enabledEndpoints = [ EModelEndpoint.openAI, @@ -19,6 +19,7 @@ async function loadDefaultEndpointsConfig() { EModelEndpoint.chatGPTBrowser, EModelEndpoint.gptPlugins, EModelEndpoint.anthropic, + EModelEndpoint.custom, ]; const endpointsEnv = process.env.ENDPOINTS || ''; @@ -37,6 +38,7 @@ async function loadDefaultEndpointsConfig() { [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, [EModelEndpoint.gptPlugins]: gptPlugins, [EModelEndpoint.anthropic]: anthropic, + [EModelEndpoint.custom]: custom, }; const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => { diff --git a/api/server/services/Config/loadDefaultModels.js b/api/server/services/Config/loadDefaultModels.js index 665aa714790..0e449d71ccf 100644 --- a/api/server/services/Config/loadDefaultModels.js +++ b/api/server/services/Config/loadDefaultModels.js @@ -5,6 +5,7 @@ const { getGoogleModels, getAnthropicModels, getChatGPTBrowserModels, + getCustomModels, } = require('~/server/services/ModelService'); const fitlerAssistantModels = (str) => { @@ -18,6 +19,7 @@ async function loadDefaultModels() { const chatGPTBrowser = getChatGPTBrowserModels(); const azureOpenAI = await getOpenAIModels({ azure: true }); const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true }); + const custom = getCustomModels(); return { [EModelEndpoint.openAI]: openAI, @@ -28,6 +30,7 @@ async function loadDefaultModels() { [EModelEndpoint.bingAI]: ['BingAI', 'Sydney'], [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, [EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels), + [EModelEndpoint.custom]: custom, }; } diff --git a/api/server/services/Endpoints/custom/index.js b/api/server/services/Endpoints/custom/index.js new file mode 100644 index 00000000000..9d78a35945e --- /dev/null +++ b/api/server/services/Endpoints/custom/index.js @@ -0,0 +1,5 @@ +const initializeClient = require('./initializeClient'); + +module.exports = { + initializeClient, +}; diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js new file mode 100644 index 00000000000..6ef4ec12f21 --- /dev/null +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -0,0 +1,44 @@ +const { EModelEndpoint } = require('librechat-data-provider'); +const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); +const { isEnabled } = require('~/server/utils'); +const { OpenAIClient } = require('~/app'); + +const initializeClient = async ({ req, res, endpointOption }) => { + const { PROXY, CUSTOM_API_KEY, CUSTOM_BASE_URL, CUSTOM_SUMMARIZE } = process.env; + const { key: expiresAt, endpoint } = req.body; + const contextStrategy = isEnabled(CUSTOM_SUMMARIZE) ? 'summarize' : null; + const clientOptions = { + contextStrategy, + reverseProxyUrl: CUSTOM_BASE_URL ?? null, + proxy: PROXY ?? null, + req, + res, + ...endpointOption, + }; + + const credentials = { + [EModelEndpoint.custom]: CUSTOM_API_KEY, + }; + + const isUserProvided = credentials[endpoint] === 'user_provided'; + + let userKey = null; + if (expiresAt && isUserProvided) { + checkUserKeyExpiry(expiresAt, 'Your API key has expired. Please provide it again.'); + userKey = await getUserKey({ userId: req.user.id, name: endpoint }); + } + + let apiKey = isUserProvided ? userKey : credentials[endpoint]; + + if (!apiKey) { + throw new Error('API key not provided.'); + } + + const client = new OpenAIClient(apiKey, clientOptions); + return { + client, + openAIApiKey: apiKey, + }; +}; + +module.exports = initializeClient; diff --git a/api/server/services/ModelService.js b/api/server/services/ModelService.js index 08c9ae71d29..83c3a62aa30 100644 --- a/api/server/services/ModelService.js +++ b/api/server/services/ModelService.js @@ -21,6 +21,7 @@ const { CHATGPT_MODELS, ANTHROPIC_MODELS, GOOGLE_MODELS, + CUSTOM_MODELS, PROXY, } = process.env ?? {}; @@ -141,9 +142,19 @@ const getGoogleModels = () => { return models; }; +const getCustomModels = () => { + let models = defaultModels[EModelEndpoint.custom]; + if (CUSTOM_MODELS) { + models = String(CUSTOM_MODELS).split(','); + } + + return models; +}; + module.exports = { getOpenAIModels, getChatGPTBrowserModels, getAnthropicModels, getGoogleModels, + getCustomModels, }; diff --git a/api/typedefs.js b/api/typedefs.js index 1ab9f645718..d83042a39cb 100644 --- a/api/typedefs.js +++ b/api/typedefs.js @@ -280,8 +280,8 @@ * @property {boolean|{userProvide: boolean}} [chatGPTBrowser] - Flag to indicate if ChatGPT Browser endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. - * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. - * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. + * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if BingAI endpoint is user provided, or its configuration. + * @property {boolean|{userProvide: boolean}} [custom] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @memberof typedefs */ @@ -313,6 +313,7 @@ * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration. + * @property {boolean|{userProvide: boolean}} [custom] - Flag to indicate if Google endpoint is user provided, or its configuration. * @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins. * @memberof typedefs */ diff --git a/api/utils/tokens.js b/api/utils/tokens.js index cda4755717d..45f794c70e4 100644 --- a/api/utils/tokens.js +++ b/api/utils/tokens.js @@ -39,22 +39,25 @@ const models = [ 'gpt-3.5-turbo-0301', ]; +const openAIModels = { + 'gpt-4': 8191, + 'gpt-4-0613': 8191, + 'gpt-4-32k': 32767, + 'gpt-4-32k-0314': 32767, + 'gpt-4-32k-0613': 32767, + 'gpt-3.5-turbo': 4095, + 'gpt-3.5-turbo-0613': 4095, + 'gpt-3.5-turbo-0301': 4095, + 'gpt-3.5-turbo-16k': 15999, + 'gpt-3.5-turbo-16k-0613': 15999, + 'gpt-3.5-turbo-1106': 16380, // -5 from max + 'gpt-4-1106': 127995, // -5 from max +}; + // Order is important here: by model series and context size (gpt-4 then gpt-3, ascending) const maxTokensMap = { - [EModelEndpoint.openAI]: { - 'gpt-4': 8191, - 'gpt-4-0613': 8191, - 'gpt-4-32k': 32767, - 'gpt-4-32k-0314': 32767, - 'gpt-4-32k-0613': 32767, - 'gpt-3.5-turbo': 4095, - 'gpt-3.5-turbo-0613': 4095, - 'gpt-3.5-turbo-0301': 4095, - 'gpt-3.5-turbo-16k': 15999, - 'gpt-3.5-turbo-16k-0613': 15999, - 'gpt-3.5-turbo-1106': 16380, // -5 from max - 'gpt-4-1106': 127995, // -5 from max - }, + [EModelEndpoint.openAI]: openAIModels, + [EModelEndpoint.custom]: openAIModels, [EModelEndpoint.google]: { /* Max I/O is combined so we subtract the amount from max response tokens for actual total */ gemini: 32750, // -10 from max From eaa140929ea7352e9f97865f6ca686ec506f5203 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sat, 30 Dec 2023 17:59:33 -0500 Subject: [PATCH 02/59] WIP(frontend/client): custom endpoint --- .../components/Chat/Menus/Endpoints/Icons.tsx | 2 + .../components/Chat/Messages/HoverButtons.tsx | 4 +- client/src/components/Endpoints/Icon.tsx | 5 ++ .../src/components/Endpoints/MinimalIcon.tsx | 5 ++ .../components/Endpoints/Settings/settings.ts | 9 ++-- .../components/Input/ModelSelect/options.ts | 1 + .../Input/SetKeyDialog/CustomConfig.tsx | 46 +++++++++++++++++++ .../Input/SetKeyDialog/SetKeyDialog.tsx | 20 +++++++- .../src/components/svg/CustomMinimalIcon.tsx | 30 ++++++++++++ client/src/components/svg/index.ts | 1 + client/src/hooks/useGenerationsByLatest.ts | 2 + client/src/store/endpoints.ts | 2 + client/src/utils/presets.ts | 5 +- 13 files changed, 124 insertions(+), 8 deletions(-) create mode 100644 client/src/components/Input/SetKeyDialog/CustomConfig.tsx create mode 100644 client/src/components/svg/CustomMinimalIcon.tsx diff --git a/client/src/components/Chat/Menus/Endpoints/Icons.tsx b/client/src/components/Chat/Menus/Endpoints/Icons.tsx index 56aed93f124..a1bffb420c2 100644 --- a/client/src/components/Chat/Menus/Endpoints/Icons.tsx +++ b/client/src/components/Chat/Menus/Endpoints/Icons.tsx @@ -6,6 +6,7 @@ import { AzureMinimalIcon, BingAIMinimalIcon, GoogleMinimalIcon, + CustomMinimalIcon, LightningIcon, } from '~/components/svg'; import { cn } from '~/utils'; @@ -18,6 +19,7 @@ export const icons = { [EModelEndpoint.chatGPTBrowser]: LightningIcon, [EModelEndpoint.google]: GoogleMinimalIcon, [EModelEndpoint.bingAI]: BingAIMinimalIcon, + [EModelEndpoint.custom]: CustomMinimalIcon, [EModelEndpoint.assistant]: ({ className = '' }) => ( = (props) => { : `rgba(0, 163, 255, ${button ? 0.75 : 1})`, name: 'ChatGPT', }, + [EModelEndpoint.custom]: { + icon: , + name: 'Custom', + }, null: { icon: , bg: 'grey', name: 'N/A' }, default: { icon: , bg: 'grey', name: 'UNKNOWN' }, }; diff --git a/client/src/components/Endpoints/MinimalIcon.tsx b/client/src/components/Endpoints/MinimalIcon.tsx index 5af2661fd67..ebf13457da4 100644 --- a/client/src/components/Endpoints/MinimalIcon.tsx +++ b/client/src/components/Endpoints/MinimalIcon.tsx @@ -6,6 +6,7 @@ import { PluginMinimalIcon, BingAIMinimalIcon, GoogleMinimalIcon, + CustomMinimalIcon, AnthropicIcon, } from '~/components/svg'; import { cn } from '~/utils'; @@ -32,6 +33,10 @@ const MinimalIcon: React.FC = (props) => { icon: , name: props.modelLabel || 'Claude', }, + [EModelEndpoint.custom]: { + icon: , + name: 'Custom', + }, [EModelEndpoint.bingAI]: { icon: , name: 'BingAI' }, [EModelEndpoint.chatGPTBrowser]: { icon: , name: 'ChatGPT' }, default: { icon: , name: 'UNKNOWN' }, diff --git a/client/src/components/Endpoints/Settings/settings.ts b/client/src/components/Endpoints/Settings/settings.ts index c80da25d2ca..9c4d302bc5e 100644 --- a/client/src/components/Endpoints/Settings/settings.ts +++ b/client/src/components/Endpoints/Settings/settings.ts @@ -1,13 +1,14 @@ import { EModelEndpoint } from 'librechat-data-provider'; -import OpenAISettings from './OpenAI'; -import BingAISettings from './BingAI'; -import AnthropicSettings from './Anthropic'; -import { Google, Plugins, GoogleSettings, PluginSettings } from './MultiView'; import type { FC } from 'react'; import type { TModelSelectProps, TBaseSettingsProps, TModels } from '~/common'; +import { Google, Plugins, GoogleSettings, PluginSettings } from './MultiView'; +import AnthropicSettings from './Anthropic'; +import BingAISettings from './BingAI'; +import OpenAISettings from './OpenAI'; const settings: { [key: string]: FC } = { [EModelEndpoint.openAI]: OpenAISettings, + [EModelEndpoint.custom]: OpenAISettings, [EModelEndpoint.azureOpenAI]: OpenAISettings, [EModelEndpoint.bingAI]: BingAISettings, [EModelEndpoint.anthropic]: AnthropicSettings, diff --git a/client/src/components/Input/ModelSelect/options.ts b/client/src/components/Input/ModelSelect/options.ts index 9863a4930e0..ce231e86371 100644 --- a/client/src/components/Input/ModelSelect/options.ts +++ b/client/src/components/Input/ModelSelect/options.ts @@ -12,6 +12,7 @@ import PluginsByIndex from './PluginsByIndex'; export const options: { [key: string]: FC } = { [EModelEndpoint.openAI]: OpenAI, + [EModelEndpoint.custom]: OpenAI, [EModelEndpoint.azureOpenAI]: OpenAI, [EModelEndpoint.bingAI]: BingAI, [EModelEndpoint.google]: Google, diff --git a/client/src/components/Input/SetKeyDialog/CustomConfig.tsx b/client/src/components/Input/SetKeyDialog/CustomConfig.tsx new file mode 100644 index 00000000000..4781fab8cc1 --- /dev/null +++ b/client/src/components/Input/SetKeyDialog/CustomConfig.tsx @@ -0,0 +1,46 @@ +// import * as Checkbox from '@radix-ui/react-checkbox'; +// import { CheckIcon } from '@radix-ui/react-icons'; +import { useFormContext, Controller } from 'react-hook-form'; +import InputWithLabel from './InputWithLabel'; + +const CustomConfig = () => { + const { control } = useFormContext(); + + return ( +
+ ( + + )} + /> + + ( + + )} + /> + + ( + + )} + /> + + ( + + )} + /> +
+ ); +}; + +export default CustomConfig; diff --git a/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx b/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx index bfb2d09d37d..64087a78915 100644 --- a/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx +++ b/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx @@ -1,4 +1,5 @@ import React, { useState } from 'react'; +import { useForm, FormProvider } from 'react-hook-form'; import { EModelEndpoint, alternateName } from 'librechat-data-provider'; import type { TDialogProps } from '~/common'; import DialogTemplate from '~/components/ui/DialogTemplate'; @@ -7,12 +8,14 @@ import { Dialog, Dropdown } from '~/components/ui'; import { useUserKey, useLocalize } from '~/hooks'; import GoogleConfig from './GoogleConfig'; import OpenAIConfig from './OpenAIConfig'; +import CustomConfig from './CustomConfig'; import OtherConfig from './OtherConfig'; import HelpText from './HelpText'; const endpointComponents = { [EModelEndpoint.google]: GoogleConfig, [EModelEndpoint.openAI]: OpenAIConfig, + [EModelEndpoint.custom]: CustomConfig, [EModelEndpoint.azureOpenAI]: OpenAIConfig, [EModelEndpoint.gptPlugins]: OpenAIConfig, default: OtherConfig, @@ -34,6 +37,15 @@ const SetKeyDialog = ({ }: Pick & { endpoint: string; }) => { + const methods = useForm({ + defaultValues: { + customEndpointName: '', + customBaseURL: '', + customModels: '', + customApiKey: '', + }, + }); + const [userKey, setUserKey] = useState(''); const [expiresAtLabel, setExpiresAtLabel] = useState(EXPIRY.TWELVE_HOURS.display); const { getExpiry, saveUserKey } = useUserKey(endpoint); @@ -46,6 +58,10 @@ const SetKeyDialog = ({ }; const submit = () => { + if (endpoint === EModelEndpoint.custom) { + methods.handleSubmit((data) => console.log(data))(); + return; + } const selectedOption = expirationOptions.find((option) => option.display === expiresAtLabel); const expiresAt = Date.now() + (selectedOption ? selectedOption.value : 0); saveUserKey(userKey, expiresAt); @@ -77,7 +93,9 @@ const SetKeyDialog = ({ options={expirationOptions.map((option) => option.display)} width={185} /> - + + + } diff --git a/client/src/components/svg/CustomMinimalIcon.tsx b/client/src/components/svg/CustomMinimalIcon.tsx new file mode 100644 index 00000000000..196dd779c2f --- /dev/null +++ b/client/src/components/svg/CustomMinimalIcon.tsx @@ -0,0 +1,30 @@ +import { cn } from '~/utils'; +export default function CustomMinimalIcon({ + size = 25, + className = '', +}: { + size?: number; + className?: string; +}) { + return ( + + + + + + + + + ); +} diff --git a/client/src/components/svg/index.ts b/client/src/components/svg/index.ts index 3ad62c93eea..5421233eaab 100644 --- a/client/src/components/svg/index.ts +++ b/client/src/components/svg/index.ts @@ -8,6 +8,7 @@ export { default as Clipboard } from './Clipboard'; export { default as CheckMark } from './CheckMark'; export { default as CrossIcon } from './CrossIcon'; export { default as LogOutIcon } from './LogOutIcon'; +export { default as CustomMinimalIcon } from './CustomMinimalIcon'; export { default as LightningIcon } from './LightningIcon'; export { default as AttachmentIcon } from './AttachmentIcon'; export { default as MessagesSquared } from './MessagesSquared'; diff --git a/client/src/hooks/useGenerationsByLatest.ts b/client/src/hooks/useGenerationsByLatest.ts index acbb3baa337..4fe91ca096e 100644 --- a/client/src/hooks/useGenerationsByLatest.ts +++ b/client/src/hooks/useGenerationsByLatest.ts @@ -19,6 +19,7 @@ export default function useGenerationsByLatest({ const { error, messageId, searchResult, finish_reason, isCreatedByUser } = message ?? {}; const isEditableEndpoint = !![ EModelEndpoint.openAI, + EModelEndpoint.custom, EModelEndpoint.google, EModelEndpoint.assistant, EModelEndpoint.anthropic, @@ -39,6 +40,7 @@ export default function useGenerationsByLatest({ !![ EModelEndpoint.azureOpenAI, EModelEndpoint.openAI, + EModelEndpoint.custom, EModelEndpoint.chatGPTBrowser, EModelEndpoint.google, EModelEndpoint.bingAI, diff --git a/client/src/store/endpoints.ts b/client/src/store/endpoints.ts index e944ebb7a0d..b92fca701cd 100644 --- a/client/src/store/endpoints.ts +++ b/client/src/store/endpoints.ts @@ -11,6 +11,7 @@ const defaultConfig: TEndpointsConfig = { [EModelEndpoint.gptPlugins]: null, [EModelEndpoint.google]: null, [EModelEndpoint.anthropic]: null, + [EModelEndpoint.custom]: null, }; const endpointsConfig = atom({ @@ -55,6 +56,7 @@ const availableEndpoints = selector({ 'bingAI', 'google', 'anthropic', + 'custom', ]; const f = get(endpointsFilter); return endpoints.filter((endpoint) => f[endpoint]); diff --git a/client/src/utils/presets.ts b/client/src/utils/presets.ts index 512572526d6..ee86087d875 100644 --- a/client/src/utils/presets.ts +++ b/client/src/utils/presets.ts @@ -26,7 +26,10 @@ export const getPresetTitle = (preset: TPreset) => { let modelInfo = model || ''; let label = ''; - if (endpoint && [EModelEndpoint.azureOpenAI, EModelEndpoint.openAI].includes(endpoint)) { + if ( + endpoint && + [EModelEndpoint.azureOpenAI, EModelEndpoint.openAI, EModelEndpoint.custom].includes(endpoint) + ) { label = chatGptLabel || ''; } else if (endpoint && [EModelEndpoint.google, EModelEndpoint.anthropic].includes(endpoint)) { label = modelLabel || ''; From 04fa37636a0bdde5cacc54a27bcfcc6cd9933f03 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 09:22:12 -0500 Subject: [PATCH 03/59] chore: adjust typedefs for configs --- api/typedefs.js | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/api/typedefs.js b/api/typedefs.js index d83042a39cb..e96d7dba292 100644 --- a/api/typedefs.js +++ b/api/typedefs.js @@ -20,6 +20,12 @@ * @memberof typedefs */ +/** + * @exports TConfig + * @typedef {import('librechat-data-provider').TConfig} TConfig + * @memberof typedefs + */ + /** * @exports ImageMetadata * @typedef {Object} ImageMetadata @@ -281,7 +287,7 @@ * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if BingAI endpoint is user provided, or its configuration. - * @property {boolean|{userProvide: boolean}} [custom] - Flag to indicate if BingAI endpoint is user provided, or its configuration. + * @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration. * @memberof typedefs */ @@ -313,14 +319,14 @@ * @property {boolean|{userProvide: boolean}} [anthropic] - Flag to indicate if Anthropic endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [bingAI] - Flag to indicate if BingAI endpoint is user provided, or its configuration. * @property {boolean|{userProvide: boolean}} [google] - Flag to indicate if Google endpoint is user provided, or its configuration. - * @property {boolean|{userProvide: boolean}} [custom] - Flag to indicate if Google endpoint is user provided, or its configuration. + * @property {boolean|{userProvide: boolean, userProvideURL: boolean, name: string}} [custom] - Custom Endpoint configuration. * @property {boolean|GptPlugins} [gptPlugins] - Configuration for GPT plugins. * @memberof typedefs */ /** * @exports EndpointConfig - * @typedef {boolean|{userProvide: boolean}|GptPlugins} EndpointConfig + * @typedef {boolean|TConfig} EndpointConfig * @memberof typedefs */ From 6fcfca0b4b44c0cb957a5ab7be141dbc04bf524c Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 09:25:57 -0500 Subject: [PATCH 04/59] refactor: use data-provider for cache keys and rename enums and custom endpoint for better clarity and compatibility --- api/cache/getLogStores.js | 31 +++++----- api/common/enums.js | 17 ------ api/server/controllers/EndpointController.js | 18 +++--- api/server/controllers/ModelController.js | 6 +- api/server/controllers/OverrideController.js | 8 +-- api/server/controllers/PluginController.js | 4 +- client/src/store/endpoints.ts | 2 +- packages/data-provider/src/schemas.ts | 62 +++++++++++++++++--- 8 files changed, 88 insertions(+), 60 deletions(-) delete mode 100644 api/common/enums.js diff --git a/api/cache/getLogStores.js b/api/cache/getLogStores.js index 77949dacd3c..016c7700009 100644 --- a/api/cache/getLogStores.js +++ b/api/cache/getLogStores.js @@ -1,9 +1,10 @@ const Keyv = require('keyv'); -const keyvMongo = require('./keyvMongo'); -const keyvRedis = require('./keyvRedis'); -const { CacheKeys } = require('~/common/enums'); -const { math, isEnabled } = require('~/server/utils'); +const { CacheKeys } = require('librechat-data-provider'); const { logFile, violationFile } = require('./keyvFiles'); +const { math, isEnabled } = require('~/server/utils'); +const keyvRedis = require('./keyvRedis'); +const keyvMongo = require('./keyvMongo'); + const { BAN_DURATION, USE_REDIS } = process.env ?? {}; const duration = math(BAN_DURATION, 7200000); @@ -20,10 +21,10 @@ const pending_req = isEnabled(USE_REDIS) const config = isEnabled(USE_REDIS) ? new Keyv({ store: keyvRedis }) - : new Keyv({ namespace: CacheKeys.CONFIG }); + : new Keyv({ namespace: CacheKeys.CONFIG_STORE }); const namespaces = { - config, + [CacheKeys.CONFIG_STORE]: config, pending_req, ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }), general: new Keyv({ store: logFile, namespace: 'violations' }), @@ -39,19 +40,15 @@ const namespaces = { * Returns the keyv cache specified by type. * If an invalid type is passed, an error will be thrown. * - * @module getLogStores - * @requires keyv - a simple key-value storage that allows you to easily switch out storage adapters. - * @requires keyvFiles - a module that includes the logFile and violationFile. - * - * @param {string} type - The type of violation, which can be 'concurrent', 'message_limit', 'registrations' or 'logins'. - * @returns {Keyv} - If a valid type is passed, returns an object containing the logs for violations of the specified type. - * @throws Will throw an error if an invalid violation type is passed. + * @param {string} key - The key for the namespace to access + * @returns {Keyv} - If a valid key is passed, returns an object containing the cache store of the specified key. + * @throws Will throw an error if an invalid key is passed. */ -const getLogStores = (type) => { - if (!type || !namespaces[type]) { - throw new Error(`Invalid store type: ${type}`); +const getLogStores = (key) => { + if (!key || !namespaces[key]) { + throw new Error(`Invalid store key: ${key}`); } - return namespaces[type]; + return namespaces[key]; }; module.exports = getLogStores; diff --git a/api/common/enums.js b/api/common/enums.js deleted file mode 100644 index 849ae43f59c..00000000000 --- a/api/common/enums.js +++ /dev/null @@ -1,17 +0,0 @@ -/** - * @typedef {Object} CacheKeys - * @property {'config'} CONFIG - Key for the config cache. - * @property {'plugins'} PLUGINS - Key for the plugins cache. - * @property {'modelsConfig'} MODELS_CONFIG - Key for the model config cache. - * @property {'defaultConfig'} DEFAULT_CONFIG - Key for the default config cache. - * @property {'overrideConfig'} OVERRIDE_CONFIG - Key for the override config cache. - */ -const CacheKeys = { - CONFIG: 'config', - PLUGINS: 'plugins', - MODELS_CONFIG: 'modelsConfig', - DEFAULT_CONFIG: 'defaultConfig', - OVERRIDE_CONFIG: 'overrideConfig', -}; - -module.exports = { CacheKeys }; diff --git a/api/server/controllers/EndpointController.js b/api/server/controllers/EndpointController.js index 0cc21f96ac3..20f27f7a6fa 100644 --- a/api/server/controllers/EndpointController.js +++ b/api/server/controllers/EndpointController.js @@ -1,17 +1,17 @@ -const { getLogStores } = require('~/cache'); -const { CacheKeys } = require('~/common/enums'); +const { CacheKeys } = require('librechat-data-provider'); const { loadDefaultEndpointsConfig } = require('~/server/services/Config'); +const { getLogStores } = require('~/cache'); async function endpointController(req, res) { - const cache = getLogStores(CacheKeys.CONFIG); - const config = await cache.get(CacheKeys.DEFAULT_CONFIG); - if (config) { - res.send(config); + const cache = getLogStores(CacheKeys.CONFIG_STORE); + const endpointConfig = await cache.get(CacheKeys.ENDPOINT_CONFIG); + if (endpointConfig) { + res.send(endpointConfig); return; } - const defaultConfig = await loadDefaultEndpointsConfig(); - await cache.set(CacheKeys.DEFAULT_CONFIG, defaultConfig); - res.send(JSON.stringify(defaultConfig)); + const endpointsConfig = await loadDefaultEndpointsConfig(); + await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig); + res.send(JSON.stringify(endpointsConfig)); } module.exports = endpointController; diff --git a/api/server/controllers/ModelController.js b/api/server/controllers/ModelController.js index 61ca82ecf03..607ddd9f25c 100644 --- a/api/server/controllers/ModelController.js +++ b/api/server/controllers/ModelController.js @@ -1,9 +1,9 @@ -const { getLogStores } = require('~/cache'); -const { CacheKeys } = require('~/common/enums'); +const { CacheKeys } = require('librechat-data-provider'); const { loadDefaultModels } = require('~/server/services/Config'); +const { getLogStores } = require('~/cache'); async function modelController(req, res) { - const cache = getLogStores(CacheKeys.CONFIG); + const cache = getLogStores(CacheKeys.CONFIG_STORE); let modelConfig = await cache.get(CacheKeys.MODELS_CONFIG); if (modelConfig) { res.send(modelConfig); diff --git a/api/server/controllers/OverrideController.js b/api/server/controllers/OverrideController.js index 0abd27a7a24..677fb87bdcb 100644 --- a/api/server/controllers/OverrideController.js +++ b/api/server/controllers/OverrideController.js @@ -1,9 +1,9 @@ -const { getLogStores } = require('~/cache'); -const { CacheKeys } = require('~/common/enums'); +const { CacheKeys } = require('librechat-data-provider'); const { loadOverrideConfig } = require('~/server/services/Config'); +const { getLogStores } = require('~/cache'); async function overrideController(req, res) { - const cache = getLogStores(CacheKeys.CONFIG); + const cache = getLogStores(CacheKeys.CONFIG_STORE); let overrideConfig = await cache.get(CacheKeys.OVERRIDE_CONFIG); if (overrideConfig) { res.send(overrideConfig); @@ -15,7 +15,7 @@ async function overrideController(req, res) { overrideConfig = await loadOverrideConfig(); const { endpointsConfig, modelsConfig } = overrideConfig; if (endpointsConfig) { - await cache.set(CacheKeys.DEFAULT_CONFIG, endpointsConfig); + await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig); } if (modelsConfig) { await cache.set(CacheKeys.MODELS_CONFIG, modelsConfig); diff --git a/api/server/controllers/PluginController.js b/api/server/controllers/PluginController.js index 697a499796c..c37b36974e0 100644 --- a/api/server/controllers/PluginController.js +++ b/api/server/controllers/PluginController.js @@ -1,7 +1,7 @@ const path = require('path'); const { promises: fs } = require('fs'); +const { CacheKeys } = require('librechat-data-provider'); const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs'); -const { CacheKeys } = require('~/common/enums'); const { getLogStores } = require('~/cache'); const filterUniquePlugins = (plugins) => { @@ -29,7 +29,7 @@ const isPluginAuthenticated = (plugin) => { const getAvailablePluginsController = async (req, res) => { try { - const cache = getLogStores(CacheKeys.CONFIG); + const cache = getLogStores(CacheKeys.CONFIG_STORE); const cachedPlugins = await cache.get(CacheKeys.PLUGINS); if (cachedPlugins) { res.status(200).json(cachedPlugins); diff --git a/client/src/store/endpoints.ts b/client/src/store/endpoints.ts index b92fca701cd..3adf2cb12ef 100644 --- a/client/src/store/endpoints.ts +++ b/client/src/store/endpoints.ts @@ -56,7 +56,7 @@ const availableEndpoints = selector({ 'bingAI', 'google', 'anthropic', - 'custom', + 'custom_endpoints', ]; const f = get(endpointsFilter); return endpoints.filter((endpoint) => f[endpoint]); diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 4698ea6a556..d9d5fd770e7 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -9,6 +9,7 @@ export enum EModelEndpoint { gptPlugins = 'gptPlugins', anthropic = 'anthropic', assistant = 'assistant', + custom = 'custom_endpoints', } export const defaultEndpoints: EModelEndpoint[] = [ @@ -20,6 +21,7 @@ export const defaultEndpoints: EModelEndpoint[] = [ EModelEndpoint.gptPlugins, EModelEndpoint.google, EModelEndpoint.anthropic, + EModelEndpoint.custom, ]; export const defaultModels = { @@ -73,6 +75,7 @@ export const alternateName = { [EModelEndpoint.gptPlugins]: 'Plugins', [EModelEndpoint.google]: 'Google', [EModelEndpoint.anthropic]: 'Anthropic', + [EModelEndpoint.custom]: 'Custom', }; export enum AuthKeys { @@ -117,13 +120,14 @@ export const endpointSettings = { const google = endpointSettings[EModelEndpoint.google]; export const EndpointURLs: { [key in EModelEndpoint]: string } = { - [EModelEndpoint.azureOpenAI]: '/api/ask/azureOpenAI', - [EModelEndpoint.openAI]: '/api/ask/openAI', - [EModelEndpoint.bingAI]: '/api/ask/bingAI', - [EModelEndpoint.chatGPTBrowser]: '/api/ask/chatGPTBrowser', - [EModelEndpoint.google]: '/api/ask/google', - [EModelEndpoint.gptPlugins]: '/api/ask/gptPlugins', - [EModelEndpoint.anthropic]: '/api/ask/anthropic', + [EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`, + [EModelEndpoint.bingAI]: `/api/ask/${EModelEndpoint.bingAI}`, + [EModelEndpoint.google]: `/api/ask/${EModelEndpoint.google}`, + [EModelEndpoint.custom]: `/api/ask/${EModelEndpoint.custom}`, + [EModelEndpoint.anthropic]: `/api/ask/${EModelEndpoint.anthropic}`, + [EModelEndpoint.gptPlugins]: `/api/ask/${EModelEndpoint.gptPlugins}`, + [EModelEndpoint.azureOpenAI]: `/api/ask/${EModelEndpoint.azureOpenAI}`, + [EModelEndpoint.chatGPTBrowser]: `/api/ask/${EModelEndpoint.chatGPTBrowser}`, [EModelEndpoint.assistant]: '/api/assistants/chat', }; @@ -132,6 +136,8 @@ export const modularEndpoints = new Set([ EModelEndpoint.anthropic, EModelEndpoint.google, EModelEndpoint.openAI, + EModelEndpoint.azureOpenAI, + EModelEndpoint.custom, ]); export const supportsFiles = { @@ -139,12 +145,14 @@ export const supportsFiles = { [EModelEndpoint.google]: true, [EModelEndpoint.assistant]: true, [EModelEndpoint.azureOpenAI]: true, + [EModelEndpoint.custom]: true, }; export const supportsBalanceCheck = { [EModelEndpoint.openAI]: true, [EModelEndpoint.azureOpenAI]: true, [EModelEndpoint.gptPlugins]: true, + [EModelEndpoint.custom]: true, }; export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision']; @@ -540,6 +548,7 @@ type EndpointSchema = const endpointSchemas: Record = { [EModelEndpoint.openAI]: openAISchema, [EModelEndpoint.azureOpenAI]: openAISchema, + [EModelEndpoint.custom]: openAISchema, [EModelEndpoint.google]: googleSchema, [EModelEndpoint.bingAI]: bingAISchema, [EModelEndpoint.anthropic]: anthropicSchema, @@ -641,6 +650,14 @@ export const getResponseSender = (endpointOption: TEndpointOption): string => { return 'PaLM2'; } + if (endpoint === EModelEndpoint.custom) { + if (modelLabel) { + return modelLabel; + } + + return 'AI'; + } + return ''; }; @@ -821,6 +838,7 @@ type CompactEndpointSchema = const compactEndpointSchemas: Record = { openAI: compactOpenAISchema, azureOpenAI: compactOpenAISchema, + custom: compactOpenAISchema, assistant: assistantSchema, google: compactGoogleSchema, /* BingAI needs all fields */ @@ -859,3 +877,33 @@ export const parseCompactConvo = ( return convo; }; + +/** + * Enum for cache keys. + */ +export enum CacheKeys { + /** + * Key for the config store namespace. + */ + CONFIG_STORE = 'configStore', + /** + * Key for the plugins cache. + */ + PLUGINS = 'plugins', + /** + * Key for the model config cache. + */ + MODELS_CONFIG = 'modelsConfig', + /** + * Key for the default endpoint config cache. + */ + ENDPOINT_CONFIG = 'endpointsConfig', + /** + * Key for the custom config cache. + */ + CUSTOM_CONFIG = 'customConfig', + /** + * Key for the override config cache. + */ + OVERRIDE_CONFIG = 'overrideConfig', +} From 98cda57adda21202d3e8561ed0107954ca42a6f0 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 09:42:34 -0500 Subject: [PATCH 05/59] feat: loadYaml utility --- api/utils/index.js | 2 ++ api/utils/loadYaml.js | 13 +++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 api/utils/loadYaml.js diff --git a/api/utils/index.js b/api/utils/index.js index f9194858e82..a40c53b6aba 100644 --- a/api/utils/index.js +++ b/api/utils/index.js @@ -1,3 +1,4 @@ +const loadYaml = require('./loadYaml'); const tokenHelpers = require('./tokens'); const azureUtils = require('./azureUtils'); const extractBaseURL = require('./extractBaseURL'); @@ -8,4 +9,5 @@ module.exports = { ...tokenHelpers, extractBaseURL, findMessageContent, + loadYaml, }; diff --git a/api/utils/loadYaml.js b/api/utils/loadYaml.js new file mode 100644 index 00000000000..eec7e2ec6ce --- /dev/null +++ b/api/utils/loadYaml.js @@ -0,0 +1,13 @@ +const fs = require('fs'); +const yaml = require('js-yaml'); + +function loadYaml(filepath) { + try { + let fileContents = fs.readFileSync(filepath, 'utf8'); + return yaml.load(fileContents); + } catch (e) { + console.error(e); + } +} + +module.exports = loadYaml; From 33df5eb096b7a3e0cfbcd4a3c5980e431bebb998 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 12:16:12 -0500 Subject: [PATCH 06/59] refactor: rename back to from and proof-of-concept for creating schemas from user-defined defaults --- client/src/store/endpoints.ts | 2 +- packages/data-provider/src/schemas.ts | 64 ++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/client/src/store/endpoints.ts b/client/src/store/endpoints.ts index 3adf2cb12ef..b92fca701cd 100644 --- a/client/src/store/endpoints.ts +++ b/client/src/store/endpoints.ts @@ -56,7 +56,7 @@ const availableEndpoints = selector({ 'bingAI', 'google', 'anthropic', - 'custom_endpoints', + 'custom', ]; const f = get(endpointsFilter); return endpoints.filter((endpoint) => f[endpoint]); diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index d9d5fd770e7..e1ad70657a0 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -9,7 +9,7 @@ export enum EModelEndpoint { gptPlugins = 'gptPlugins', anthropic = 'anthropic', assistant = 'assistant', - custom = 'custom_endpoints', + custom = 'custom', } export const defaultEndpoints: EModelEndpoint[] = [ @@ -315,6 +315,8 @@ export const tPresetSchema = tConversationSchema export type TPreset = z.infer; +type DefaultSchemaValues = Partial; + export const openAISchema = tConversationSchema .pick({ model: true, @@ -392,6 +394,56 @@ export const googleSchema = tConversationSchema topK: google.topK.default, })); +const createGoogleSchema = (customGoogle: DefaultSchemaValues) => { + const defaults = { ...google, ...customGoogle }; + return tConversationSchema + .pick({ + model: true, + modelLabel: true, + promptPrefix: true, + examples: true, + temperature: true, + maxOutputTokens: true, + topP: true, + topK: true, + }) + .transform((obj) => { + const isGeminiPro = obj?.model?.toLowerCase()?.includes('gemini-pro'); + + const maxOutputTokensMax = isGeminiPro + ? defaults.maxOutputTokens.maxGeminiPro + : defaults.maxOutputTokens.max; + const maxOutputTokensDefault = isGeminiPro + ? defaults.maxOutputTokens.defaultGeminiPro + : defaults.maxOutputTokens.default; + + let maxOutputTokens = obj.maxOutputTokens ?? maxOutputTokensDefault; + maxOutputTokens = Math.min(maxOutputTokens, maxOutputTokensMax); + + return { + ...obj, + model: obj.model ?? defaults.model.default, + modelLabel: obj.modelLabel ?? null, + promptPrefix: obj.promptPrefix ?? null, + examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }], + temperature: obj.temperature ?? defaults.temperature.default, + maxOutputTokens, + topP: obj.topP ?? defaults.topP.default, + topK: obj.topK ?? defaults.topK.default, + }; + }) + .catch(() => ({ + model: defaults.model.default, + modelLabel: null, + promptPrefix: null, + examples: [{ input: { content: '' }, output: { content: '' } }], + temperature: defaults.temperature.default, + maxOutputTokens: defaults.maxOutputTokens.default, + topP: defaults.topP.default, + topK: defaults.topK.default, + })); +}; + export const bingAISchema = tConversationSchema .pick({ jailbreak: true, @@ -557,6 +609,10 @@ const endpointSchemas: Record = { [EModelEndpoint.assistant]: assistantSchema, }; +// const schemaCreators: Record EndpointSchema> = { +// [EModelEndpoint.google]: createGoogleSchema, +// }; + export function getFirstDefinedValue(possibleValues: string[]) { let returnValue; for (const value of possibleValues) { @@ -577,6 +633,8 @@ export const parseConvo = ( endpoint: EModelEndpoint, conversation: Partial, possibleValues?: TPossibleValues, + // TODO: POC for default schema + // defaultSchema?: Partial, ) => { const schema = endpointSchemas[endpoint]; @@ -584,6 +642,10 @@ export const parseConvo = ( throw new Error(`Unknown endpoint: ${endpoint}`); } + // if (defaultSchema && schemaCreators[endpoint]) { + // schema = schemaCreators[endpoint](defaultSchema); + // } + const convo = schema.parse(conversation) as TConversation; const { models, secondaryModels } = possibleValues ?? {}; From 01f10604b8031b993e3780ef3ffd0d48a00a4a6b Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 12:17:43 -0500 Subject: [PATCH 07/59] refactor: remove custom endpoint from default endpointsConfig as it will be exclusively managed by yaml config --- api/server/services/Config/EndpointService.js | 2 -- api/server/services/Config/loadDefaultEConfig.js | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/api/server/services/Config/EndpointService.js b/api/server/services/Config/EndpointService.js index b4cee9c77a8..998e7a83d03 100644 --- a/api/server/services/Config/EndpointService.js +++ b/api/server/services/Config/EndpointService.js @@ -4,7 +4,6 @@ const { OPENAI_API_KEY: openAIApiKey, AZURE_API_KEY: azureOpenAIApiKey, ANTHROPIC_API_KEY: anthropicApiKey, - CUSTOM_API_KEY: customApiKey, CHATGPT_TOKEN: chatGPTToken, BINGAI_TOKEN: bingToken, PLUGINS_USE_AZURE, @@ -34,6 +33,5 @@ module.exports = { [EModelEndpoint.chatGPTBrowser]: isUserProvided(chatGPTToken), [EModelEndpoint.anthropic]: isUserProvided(anthropicApiKey), [EModelEndpoint.bingAI]: isUserProvided(bingToken), - [EModelEndpoint.custom]: isUserProvided(customApiKey), }, }; diff --git a/api/server/services/Config/loadDefaultEConfig.js b/api/server/services/Config/loadDefaultEConfig.js index 620a3089b16..34ab05d8ab8 100644 --- a/api/server/services/Config/loadDefaultEConfig.js +++ b/api/server/services/Config/loadDefaultEConfig.js @@ -9,7 +9,7 @@ const { config } = require('./EndpointService'); */ async function loadDefaultEndpointsConfig() { const { google, gptPlugins } = await loadAsyncEndpoints(); - const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser, custom } = config; + const { openAI, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config; let enabledEndpoints = [ EModelEndpoint.openAI, @@ -19,7 +19,6 @@ async function loadDefaultEndpointsConfig() { EModelEndpoint.chatGPTBrowser, EModelEndpoint.gptPlugins, EModelEndpoint.anthropic, - EModelEndpoint.custom, ]; const endpointsEnv = process.env.ENDPOINTS || ''; @@ -38,7 +37,6 @@ async function loadDefaultEndpointsConfig() { [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, [EModelEndpoint.gptPlugins]: gptPlugins, [EModelEndpoint.anthropic]: anthropic, - [EModelEndpoint.custom]: custom, }; const orderedAndFilteredEndpoints = enabledEndpoints.reduce((config, key, index) => { From 3b8c0380802275e67a58e17915e81f67571f5808 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 12:18:48 -0500 Subject: [PATCH 08/59] refactor(EndpointController): rename variables for clarity --- api/server/controllers/EndpointController.js | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/server/controllers/EndpointController.js b/api/server/controllers/EndpointController.js index 20f27f7a6fa..3b7c29a081d 100644 --- a/api/server/controllers/EndpointController.js +++ b/api/server/controllers/EndpointController.js @@ -4,11 +4,12 @@ const { getLogStores } = require('~/cache'); async function endpointController(req, res) { const cache = getLogStores(CacheKeys.CONFIG_STORE); - const endpointConfig = await cache.get(CacheKeys.ENDPOINT_CONFIG); - if (endpointConfig) { - res.send(endpointConfig); + const cachedEndpointsConfig = await cache.get(CacheKeys.ENDPOINT_CONFIG); + if (cachedEndpointsConfig) { + res.send(cachedEndpointsConfig); return; } + const endpointsConfig = await loadDefaultEndpointsConfig(); await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig); res.send(JSON.stringify(endpointsConfig)); From 3e5cb1bd7d792d28bbbffbc0c3731bc7ef8322fa Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 12:20:12 -0500 Subject: [PATCH 09/59] feat: initial load custom config --- .../services/Config/loadCustomConfig.js | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 api/server/services/Config/loadCustomConfig.js diff --git a/api/server/services/Config/loadCustomConfig.js b/api/server/services/Config/loadCustomConfig.js new file mode 100644 index 00000000000..c18be7e2575 --- /dev/null +++ b/api/server/services/Config/loadCustomConfig.js @@ -0,0 +1,26 @@ +const path = require('path'); +const { CacheKeys } = require('librechat-data-provider'); +const loadYaml = require('~/utils/loadYaml'); +const { getLogStores } = require('~/cache'); + +const apiRoot = path.resolve(__dirname, '..', '..', '..'); +const configPath = path.resolve(apiRoot, 'data', 'custom-config.yaml'); + +/** + * Load custom endpoints and caches the configuration object + * @function loadCustomConfig */ +async function loadCustomConfig() { + const customConfig = loadYaml(configPath); + if (!customConfig) { + return null; + } + + const cache = getLogStores(CacheKeys.CONFIG_STORE); + await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig); + + // TODO: handle remote config + + // return customConfig; +} + +module.exports = loadCustomConfig; From 135debf51e0a2491de219283f648f1f531f43abc Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:01:55 -0500 Subject: [PATCH 10/59] feat(server/utils): add simple `isUserProvided` helper --- api/server/utils/handleText.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/api/server/utils/handleText.js b/api/server/utils/handleText.js index 4cd1b7ce994..a1eece0e6e6 100644 --- a/api/server/utils/handleText.js +++ b/api/server/utils/handleText.js @@ -165,6 +165,14 @@ function isEnabled(value) { return false; } +/** + * Checks if the provided value is 'user_provided'. + * + * @param {string} value - The value to check. + * @returns {boolean} - Returns true if the value is 'user_provided', otherwise false. + */ +const isUserProvided = (value) => value === 'user_provided'; + module.exports = { createOnProgress, isEnabled, @@ -172,4 +180,5 @@ module.exports = { formatSteps, formatAction, addSpaceIfNeeded, + isUserProvided, }; From b3cc02c9808dfcca4c328d366f5def51b9e3c03e Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:29:00 -0500 Subject: [PATCH 11/59] chore(types): update TConfig type --- packages/data-provider/src/types.ts | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index 0e66da603e8..2f8d0d58b0c 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -1,5 +1,11 @@ import OpenAI from 'openai'; -import type { TResPlugin, TMessage, TConversation, TEndpointOption } from './schemas'; +import type { + TResPlugin, + TMessage, + TConversation, + TEndpointOption, + EModelEndpoint, +} from './schemas'; export type TOpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam; export type TOpenAIFunction = OpenAI.Chat.ChatCompletionCreateParams.Function; @@ -115,7 +121,10 @@ export type TSearchResults = { export type TConfig = { availableModels?: []; + name?: string; userProvide?: boolean | null; + userProvideURL?: boolean | null; + type?: EModelEndpoint; availableTools?: []; plugins?: Record; azure?: boolean; From 637d57c75a288adde1d592f1ce6db19b44df2edc Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:30:52 -0500 Subject: [PATCH 12/59] refactor: remove custom endpoint handling from model services as will be handled by config, modularize fetching of models --- .../services/Config/loadDefaultModels.js | 3 - api/server/services/ModelService.js | 84 +++++++++++-------- 2 files changed, 50 insertions(+), 37 deletions(-) diff --git a/api/server/services/Config/loadDefaultModels.js b/api/server/services/Config/loadDefaultModels.js index 0e449d71ccf..665aa714790 100644 --- a/api/server/services/Config/loadDefaultModels.js +++ b/api/server/services/Config/loadDefaultModels.js @@ -5,7 +5,6 @@ const { getGoogleModels, getAnthropicModels, getChatGPTBrowserModels, - getCustomModels, } = require('~/server/services/ModelService'); const fitlerAssistantModels = (str) => { @@ -19,7 +18,6 @@ async function loadDefaultModels() { const chatGPTBrowser = getChatGPTBrowserModels(); const azureOpenAI = await getOpenAIModels({ azure: true }); const gptPlugins = await getOpenAIModels({ azure: useAzurePlugins, plugins: true }); - const custom = getCustomModels(); return { [EModelEndpoint.openAI]: openAI, @@ -30,7 +28,6 @@ async function loadDefaultModels() { [EModelEndpoint.bingAI]: ['BingAI', 'Sydney'], [EModelEndpoint.chatGPTBrowser]: chatGPTBrowser, [EModelEndpoint.assistant]: openAI.filter(fitlerAssistantModels), - [EModelEndpoint.custom]: custom, }; } diff --git a/api/server/services/ModelService.js b/api/server/services/ModelService.js index 83c3a62aa30..2e433dbd14e 100644 --- a/api/server/services/ModelService.js +++ b/api/server/services/ModelService.js @@ -21,19 +21,56 @@ const { CHATGPT_MODELS, ANTHROPIC_MODELS, GOOGLE_MODELS, - CUSTOM_MODELS, PROXY, } = process.env ?? {}; +/** + * Fetches OpenAI models from the specified base API path or Azure, based on the provided configuration. + * + * @param {Object} params - The parameters for fetching the models. + * @param {string} params.apiKey - The API key for authentication with the API. + * @param {string} params.baseURL - The base path URL for the API. + * @param {string} [params.name='OpenAI'] - The name of the API; defaults to 'OpenAI'. + * @param {boolean} [params.azure=false] - Whether to fetch models from Azure. + * @returns {Promise} A promise that resolves to an array of model identifiers. + * @async + */ +const fetchModels = async ({ apiKey, baseURL, name = 'OpenAI', azure = false }) => { + let models = []; + + if (!baseURL && !azure) { + return models; + } + + try { + const payload = { + headers: { + Authorization: `Bearer ${apiKey}`, + }, + }; + + if (PROXY) { + payload.httpsAgent = new HttpsProxyAgent(PROXY); + } + + const res = await axios.get(`${baseURL}${azure ? '' : '/models'}`, payload); + models = res.data.data.map((item) => item.id); + } catch (err) { + logger.error(`Failed to fetch models from ${azure ? 'Azure ' : ''}${name} API`, err); + } + + return models; +}; + const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _models = []) => { let models = _models.slice() ?? []; let apiKey = openAIApiKey; - let basePath = 'https://api.openai.com/v1'; + let baseURL = 'https://api.openai.com/v1'; let reverseProxyUrl = OPENAI_REVERSE_PROXY; if (opts.azure) { return models; // const azure = getAzureCredentials(); - // basePath = (genAzureChatCompletion(azure)) + // baseURL = (genAzureChatCompletion(azure)) // .split('/deployments')[0] // .concat(`/models?api-version=${azure.azureOpenAIApiVersion}`); // apiKey = azureOpenAIApiKey; @@ -43,32 +80,20 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model } if (reverseProxyUrl) { - basePath = extractBaseURL(reverseProxyUrl); + baseURL = extractBaseURL(reverseProxyUrl); } - const cachedModels = await modelsCache.get(basePath); + const cachedModels = await modelsCache.get(baseURL); if (cachedModels) { return cachedModels; } - if (basePath || opts.azure) { - try { - const payload = { - headers: { - Authorization: `Bearer ${apiKey}`, - }, - }; - - if (PROXY) { - payload.httpsAgent = new HttpsProxyAgent(PROXY); - } - const res = await axios.get(`${basePath}${opts.azure ? '' : '/models'}`, payload); - - models = res.data.data.map((item) => item.id); - // logger.debug(`Fetched ${models.length} models from ${opts.azure ? 'Azure ' : ''}OpenAI API`); - } catch (err) { - logger.error(`Failed to fetch models from ${opts.azure ? 'Azure ' : ''}OpenAI API`, err); - } + if (baseURL || opts.azure) { + models = await fetchModels({ + apiKey, + baseURL, + azure: opts.azure, + }); } if (!reverseProxyUrl) { @@ -76,7 +101,7 @@ const fetchOpenAIModels = async (opts = { azure: false, plugins: false }, _model models = models.filter((model) => regex.test(model)); } - await modelsCache.set(basePath, models); + await modelsCache.set(baseURL, models); return models; }; @@ -142,19 +167,10 @@ const getGoogleModels = () => { return models; }; -const getCustomModels = () => { - let models = defaultModels[EModelEndpoint.custom]; - if (CUSTOM_MODELS) { - models = String(CUSTOM_MODELS).split(','); - } - - return models; -}; - module.exports = { + fetchModels, getOpenAIModels, getChatGPTBrowserModels, getAnthropicModels, getGoogleModels, - getCustomModels, }; From 73ded7b3f05a2f8a14051619f3a7880b309b0e6d Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:31:59 -0500 Subject: [PATCH 13/59] feat: loadCustomConfig, loadConfigEndpoints, loadConfigModels --- api/server/services/Config/index.js | 6 ++ .../services/Config/loadConfigEndpoints.js | 42 ++++++++++++ .../services/Config/loadConfigModels.js | 64 +++++++++++++++++++ .../services/Config/loadCustomConfig.js | 15 +++-- 4 files changed, 122 insertions(+), 5 deletions(-) create mode 100644 api/server/services/Config/loadConfigEndpoints.js create mode 100644 api/server/services/Config/loadConfigModels.js diff --git a/api/server/services/Config/index.js b/api/server/services/Config/index.js index 13cbc09f3b3..57a00bf515e 100644 --- a/api/server/services/Config/index.js +++ b/api/server/services/Config/index.js @@ -1,13 +1,19 @@ const { config } = require('./EndpointService'); +const loadCustomConfig = require('./loadCustomConfig'); +const loadConfigModels = require('./loadConfigModels'); const loadDefaultModels = require('./loadDefaultModels'); const loadOverrideConfig = require('./loadOverrideConfig'); const loadAsyncEndpoints = require('./loadAsyncEndpoints'); +const loadConfigEndpoints = require('./loadConfigEndpoints'); const loadDefaultEndpointsConfig = require('./loadDefaultEConfig'); module.exports = { config, + loadCustomConfig, + loadConfigModels, loadDefaultModels, loadOverrideConfig, loadAsyncEndpoints, + loadConfigEndpoints, loadDefaultEndpointsConfig, }; diff --git a/api/server/services/Config/loadConfigEndpoints.js b/api/server/services/Config/loadConfigEndpoints.js new file mode 100644 index 00000000000..8c262ee7e96 --- /dev/null +++ b/api/server/services/Config/loadConfigEndpoints.js @@ -0,0 +1,42 @@ +const { CacheKeys, EModelEndpoint } = require('librechat-data-provider'); +const loadCustomConfig = require('./loadCustomConfig'); +const { isUserProvided } = require('~/server/utils'); +const { getLogStores } = require('~/cache'); + +/** + * Load config endpoints from the cached configuration object + * @function loadConfigEndpoints */ +async function loadConfigEndpoints() { + const cache = getLogStores(CacheKeys.CONFIG_STORE); + let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG); + + if (!customConfig) { + customConfig = await loadCustomConfig(); + } + + if (!customConfig) { + return {}; + } + + const { endpoints = {} } = customConfig ?? {}; + + const customEndpoints = endpoints[EModelEndpoint.custom]; + if (Array.isArray(customEndpoints)) { + endpoints[EModelEndpoint.custom] = customEndpoints + .filter((endpoint) => endpoint.baseURL && endpoint.apiKey && endpoint.name && endpoint.models) + .map((endpoint) => { + const { baseURL, apiKey, name } = endpoint; + return { + [name]: { + type: EModelEndpoint.custom, + userProvide: isUserProvided(apiKey), + userProvideURL: isUserProvided(baseURL), + }, + }; + }); + } + + return endpoints; +} + +module.exports = loadConfigEndpoints; diff --git a/api/server/services/Config/loadConfigModels.js b/api/server/services/Config/loadConfigModels.js new file mode 100644 index 00000000000..e0d2d89d12f --- /dev/null +++ b/api/server/services/Config/loadConfigModels.js @@ -0,0 +1,64 @@ +const { CacheKeys, EModelEndpoint } = require('librechat-data-provider'); +const { fetchModels } = require('~/server/services/ModelService'); +const loadCustomConfig = require('./loadCustomConfig'); +// const { isUserProvided } = require('~/server/utils'); +const { getLogStores } = require('~/cache'); + +/** + * Load config endpoints from the cached configuration object + * @function loadConfigModels */ +async function loadConfigModels() { + const cache = getLogStores(CacheKeys.CONFIG_STORE); + let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG); + + if (!customConfig) { + customConfig = await loadCustomConfig(); + } + + if (!customConfig) { + return {}; + } + + const { endpoints = {} } = customConfig ?? {}; + + let customEndpoints = endpoints[EModelEndpoint.custom]; + const configModels = { + [EModelEndpoint.custom]: [], + }; + + if (Array.isArray(customEndpoints)) { + customEndpoints = customEndpoints.filter( + (endpoint) => + endpoint.baseURL && + endpoint.apiKey && + endpoint.name && + endpoint.models && + (endpoint.models.fetch || endpoint.models.default), + ); + + for (let i = 0; i < customEndpoints.length; i++) { + const endpoint = customEndpoints[i]; + const { models, name, baseURL, apiKey } = endpoint; + if (models.fetch) { + const customEndpoint = { + [name]: await fetchModels({ + baseURL, + apiKey, + }), + }; + + configModels[EModelEndpoint.custom].push(customEndpoint); + + continue; + } + + if (models.default) { + configModels[EModelEndpoint.custom].push({ [name]: models.default }); + } + } + } + + return configModels; +} + +module.exports = loadConfigModels; diff --git a/api/server/services/Config/loadCustomConfig.js b/api/server/services/Config/loadCustomConfig.js index c18be7e2575..cd31c57e549 100644 --- a/api/server/services/Config/loadCustomConfig.js +++ b/api/server/services/Config/loadCustomConfig.js @@ -7,20 +7,25 @@ const apiRoot = path.resolve(__dirname, '..', '..', '..'); const configPath = path.resolve(apiRoot, 'data', 'custom-config.yaml'); /** - * Load custom endpoints and caches the configuration object - * @function loadCustomConfig */ + * Load custom configuration files and caches the object if the `cache` field at root is true. + * @function loadCustomConfig + * @returns {Promise} A promise that resolves to null or the custom config object. + * */ + async function loadCustomConfig() { const customConfig = loadYaml(configPath); if (!customConfig) { return null; } - const cache = getLogStores(CacheKeys.CONFIG_STORE); - await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig); + if (customConfig.cache) { + const cache = getLogStores(CacheKeys.CONFIG_STORE); + await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig); + } // TODO: handle remote config - // return customConfig; + return customConfig; } module.exports = loadCustomConfig; From 425c8d82cd2ba843508bf938fec86303838da186 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:32:36 -0500 Subject: [PATCH 14/59] chore: reorganize server init imports, invoke loadCustomConfig --- api/server/index.js | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/api/server/index.js b/api/server/index.js index 698620c56f3..c3586e481e2 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -5,14 +5,15 @@ const express = require('express'); const passport = require('passport'); const mongoSanitize = require('express-mongo-sanitize'); const { initializeFirebase } = require('~/server/services/Files/Firebase/initialize'); -const errorController = require('./controllers/ErrorController'); -const configureSocialLogins = require('./socialLogins'); +const loadCustomConfig = require('~/server/services/Config/loadCustomConfig'); +const errorController = require('~/server/controllers/ErrorController'); +const configureSocialLogins = require('~/server/socialLogins'); +const noIndex = require('~/server/middleware/noIndex'); const { connectDb, indexSync } = require('~/lib/db'); const { logger } = require('~/config'); -const noIndex = require('./middleware/noIndex'); +const routes = require('~/server/routes'); const paths = require('~/config/paths'); -const routes = require('./routes'); const { PORT, HOST, ALLOW_SOCIAL_LOGIN } = process.env ?? {}; @@ -24,6 +25,7 @@ const { jwtLogin, passportLogin } = require('~/strategies'); const startServer = async () => { await connectDb(); logger.info('Connected to MongoDB'); + await loadCustomConfig(); initializeFirebase(); await indexSync(); From e608f6db304e89d6fc4d0f2e6cb141e9acb34a0f Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:53:53 -0500 Subject: [PATCH 15/59] refactor(loadConfigEndpoints/Models): return each custom endpoint as standalone endpoint --- .../services/Config/loadConfigEndpoints.js | 36 +++++++++++-------- .../services/Config/loadConfigModels.js | 34 ++++++++---------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/api/server/services/Config/loadConfigEndpoints.js b/api/server/services/Config/loadConfigEndpoints.js index 8c262ee7e96..b91947ba266 100644 --- a/api/server/services/Config/loadConfigEndpoints.js +++ b/api/server/services/Config/loadConfigEndpoints.js @@ -19,24 +19,30 @@ async function loadConfigEndpoints() { } const { endpoints = {} } = customConfig ?? {}; + const endpointsConfig = {}; - const customEndpoints = endpoints[EModelEndpoint.custom]; - if (Array.isArray(customEndpoints)) { - endpoints[EModelEndpoint.custom] = customEndpoints - .filter((endpoint) => endpoint.baseURL && endpoint.apiKey && endpoint.name && endpoint.models) - .map((endpoint) => { - const { baseURL, apiKey, name } = endpoint; - return { - [name]: { - type: EModelEndpoint.custom, - userProvide: isUserProvided(apiKey), - userProvideURL: isUserProvided(baseURL), - }, - }; - }); + if (Array.isArray(endpoints[EModelEndpoint.custom])) { + const customEndpoints = endpoints[EModelEndpoint.custom].filter( + (endpoint) => + endpoint.baseURL && + endpoint.apiKey && + endpoint.name && + endpoint.models && + (endpoint.models.fetch || endpoint.models.default), + ); + + for (let i = 0; i < customEndpoints.length; i++) { + const endpoint = customEndpoints[i]; + const { baseURL, apiKey, name } = endpoint; + endpointsConfig[name] = { + type: EModelEndpoint.custom, + userProvide: isUserProvided(apiKey), + userProvideURL: isUserProvided(baseURL), + }; + } } - return endpoints; + return endpointsConfig; } module.exports = loadConfigEndpoints; diff --git a/api/server/services/Config/loadConfigModels.js b/api/server/services/Config/loadConfigModels.js index e0d2d89d12f..e013c5605bd 100644 --- a/api/server/services/Config/loadConfigModels.js +++ b/api/server/services/Config/loadConfigModels.js @@ -1,7 +1,7 @@ const { CacheKeys, EModelEndpoint } = require('librechat-data-provider'); const { fetchModels } = require('~/server/services/ModelService'); const loadCustomConfig = require('./loadCustomConfig'); -// const { isUserProvided } = require('~/server/utils'); +const { isUserProvided } = require('~/server/utils'); const { getLogStores } = require('~/cache'); /** @@ -20,14 +20,10 @@ async function loadConfigModels() { } const { endpoints = {} } = customConfig ?? {}; + const modelsConfig = {}; - let customEndpoints = endpoints[EModelEndpoint.custom]; - const configModels = { - [EModelEndpoint.custom]: [], - }; - - if (Array.isArray(customEndpoints)) { - customEndpoints = customEndpoints.filter( + if (Array.isArray(endpoints[EModelEndpoint.custom])) { + const customEndpoints = endpoints[EModelEndpoint.custom].filter( (endpoint) => endpoint.baseURL && endpoint.apiKey && @@ -39,26 +35,26 @@ async function loadConfigModels() { for (let i = 0; i < customEndpoints.length; i++) { const endpoint = customEndpoints[i]; const { models, name, baseURL, apiKey } = endpoint; - if (models.fetch) { - const customEndpoint = { - [name]: await fetchModels({ - baseURL, - apiKey, - }), - }; - configModels[EModelEndpoint.custom].push(customEndpoint); + modelsConfig[name] = []; + // TODO: allow fetching with user provided api key and base url + const shouldFetch = models.fetch && !isUserProvided(apiKey) && !isUserProvided(baseURL); + if (shouldFetch) { + modelsConfig[name] = await fetchModels({ + baseURL, + apiKey, + }); continue; } - if (models.default) { - configModels[EModelEndpoint.custom].push({ [name]: models.default }); + if (Array.isArray(models.default)) { + modelsConfig[name] = models.default; } } } - return configModels; + return modelsConfig; } module.exports = loadConfigModels; From 62bb326a3670c44156ac769368f6544fbd25a714 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 14:54:38 -0500 Subject: [PATCH 16/59] refactor(Endpoint/ModelController): spread config values after default (temporary) --- api/server/controllers/EndpointController.js | 8 ++++++-- api/server/controllers/ModelController.js | 14 +++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/api/server/controllers/EndpointController.js b/api/server/controllers/EndpointController.js index 3b7c29a081d..5069bb33e0b 100644 --- a/api/server/controllers/EndpointController.js +++ b/api/server/controllers/EndpointController.js @@ -1,5 +1,5 @@ const { CacheKeys } = require('librechat-data-provider'); -const { loadDefaultEndpointsConfig } = require('~/server/services/Config'); +const { loadDefaultEndpointsConfig, loadConfigEndpoints } = require('~/server/services/Config'); const { getLogStores } = require('~/cache'); async function endpointController(req, res) { @@ -10,7 +10,11 @@ async function endpointController(req, res) { return; } - const endpointsConfig = await loadDefaultEndpointsConfig(); + const defaultEndpointsConfig = await loadDefaultEndpointsConfig(); + const customConfigEndpoints = await loadConfigEndpoints(); + + const endpointsConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints }; + await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig); res.send(JSON.stringify(endpointsConfig)); } diff --git a/api/server/controllers/ModelController.js b/api/server/controllers/ModelController.js index 607ddd9f25c..2d23961e154 100644 --- a/api/server/controllers/ModelController.js +++ b/api/server/controllers/ModelController.js @@ -1,15 +1,19 @@ const { CacheKeys } = require('librechat-data-provider'); -const { loadDefaultModels } = require('~/server/services/Config'); +const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config'); const { getLogStores } = require('~/cache'); async function modelController(req, res) { const cache = getLogStores(CacheKeys.CONFIG_STORE); - let modelConfig = await cache.get(CacheKeys.MODELS_CONFIG); - if (modelConfig) { - res.send(modelConfig); + const cachedModelsConfig = await cache.get(CacheKeys.MODELS_CONFIG); + if (cachedModelsConfig) { + res.send(cachedModelsConfig); return; } - modelConfig = await loadDefaultModels(); + const defaultModelsConfig = await loadDefaultModels(); + const customModelsConfig = await loadConfigModels(); + + const modelConfig = { ...defaultModelsConfig, ...customModelsConfig }; + await cache.set(CacheKeys.MODELS_CONFIG, modelConfig); res.send(modelConfig); } From ba9d068ebebd925ff809a241b1060ee293b03989 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sun, 31 Dec 2023 16:26:15 -0500 Subject: [PATCH 17/59] chore(client): fix type issues --- .../src/components/Chat/Input/OptionsBar.tsx | 21 ++++--------------- .../Chat/Menus/Presets/EditPresetDialog.tsx | 1 - client/src/hooks/useDefaultConvo.ts | 4 ++-- client/src/hooks/useNewConvo.ts | 10 +++++++-- packages/data-provider/src/types.ts | 2 +- 5 files changed, 15 insertions(+), 23 deletions(-) diff --git a/client/src/components/Chat/Input/OptionsBar.tsx b/client/src/components/Chat/Input/OptionsBar.tsx index 3b49bf3b285..049542ca1c8 100644 --- a/client/src/components/Chat/Input/OptionsBar.tsx +++ b/client/src/components/Chat/Input/OptionsBar.tsx @@ -24,14 +24,8 @@ export default function OptionsBar({ messagesTree }) { store.showPluginStoreDialog, ); - const { - showPopover, - conversation, - latestMessage, - setShowPopover, - setShowBingToneSetting, - textareaHeight, - } = useChatContext(); + const { showPopover, conversation, latestMessage, setShowPopover, setShowBingToneSetting } = + useChatContext(); const { setOption } = useSetIndexOptions(); const { endpoint, conversationId, jailbreak } = conversation ?? {}; @@ -81,14 +75,7 @@ export default function OptionsBar({ messagesTree }) { ? altSettings[endpoint] : () => setShowPopover((prev) => !prev); return ( -
+
setShowPopover(false)} - PopoverButtons={} + PopoverButtons={} >
diff --git a/client/src/hooks/useDefaultConvo.ts b/client/src/hooks/useDefaultConvo.ts index f60f35a3dd8..59ff182d7a1 100644 --- a/client/src/hooks/useDefaultConvo.ts +++ b/client/src/hooks/useDefaultConvo.ts @@ -1,13 +1,13 @@ import { useRecoilValue } from 'recoil'; import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; -import type { TConversation, TPreset } from 'librechat-data-provider'; +import type { TConversation, TPreset, TEndpointsConfig } from 'librechat-data-provider'; import { getDefaultEndpoint, buildDefaultConvo } from '~/utils'; import store from '~/store'; type TDefaultConvo = { conversation: Partial; preset?: Partial | null }; const useDefaultConvo = () => { - const { data: endpointsConfig = {} } = useGetEndpointsQuery(); + const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery(); const modelsConfig = useRecoilValue(store.modelsConfig); const getDefaultConversation = ({ conversation, preset }: TDefaultConvo) => { diff --git a/client/src/hooks/useNewConvo.ts b/client/src/hooks/useNewConvo.ts index e92150765e3..02768c15246 100644 --- a/client/src/hooks/useNewConvo.ts +++ b/client/src/hooks/useNewConvo.ts @@ -7,7 +7,13 @@ import { useRecoilState, useRecoilValue, } from 'recoil'; -import type { TConversation, TSubmission, TPreset, TModelsConfig } from 'librechat-data-provider'; +import type { + TConversation, + TSubmission, + TPreset, + TModelsConfig, + TEndpointsConfig, +} from 'librechat-data-provider'; import { buildDefaultConvo, getDefaultEndpoint } from '~/utils'; import { useDeleteFilesMutation } from '~/data-provider'; import useOriginNavigate from './useOriginNavigate'; @@ -22,7 +28,7 @@ const useNewConvo = (index = 0) => { const [files, setFiles] = useRecoilState(store.filesByIndex(index)); const setSubmission = useSetRecoilState(store.submissionByIndex(index)); const resetLatestMessage = useResetRecoilState(store.latestMessageFamily(index)); - const { data: endpointsConfig = {} } = useGetEndpointsQuery(); + const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery(); const { mutateAsync } = useDeleteFilesMutation({ onSuccess: () => { diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index 2f8d0d58b0c..a1db8794b44 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -133,7 +133,7 @@ export type TConfig = { export type TModelsConfig = Record; -export type TEndpointsConfig = Record; +export type TEndpointsConfig = Record; export type TUpdateTokenCountResponse = { count: number; From ce69401318f57d6f00c155bf4fe9b894467f15bd Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 02:06:07 -0500 Subject: [PATCH 18/59] WIP: first pass for multiple custom endpoints - add endpointType to Conversation schema - add update zod schemas for both convo/presets to allow non-EModelEndpoint value as endpoint (also using type assertion) - use `endpointType` value as `endpoint` where mapping to type is necessary using this field - use custom defined `endpoint` value and not type for mapping to modelsConfig - misc: add return type to `getDefaultEndpoint` - in `useNewConvo`, add the endpointType if it wasn't already added to conversation - EndpointsMenu: use user-defined endpoint name as Title in menu - TODO: custom icon via custom config, change unknown to robot icon --- api/models/schema/convoSchema.js | 10 +-- client/src/components/Chat/Input/ChatForm.tsx | 6 +- .../components/Chat/Input/HeaderOptions.tsx | 12 ++- .../src/components/Chat/Input/OptionsBar.tsx | 12 ++- .../components/Chat/Input/PopoverButtons.tsx | 3 +- client/src/components/Chat/Landing.tsx | 7 +- .../components/Chat/Menus/EndpointsMenu.tsx | 2 +- .../Chat/Messages/Content/EditMessage.tsx | 3 +- .../components/Chat/Messages/HoverButtons.tsx | 3 +- .../components/Endpoints/EndpointSettings.tsx | 5 +- .../Input/ModelSelect/ModelSelect.tsx | 6 +- client/src/hooks/useNewConvo.ts | 4 + client/src/hooks/useSSE.ts | 6 +- client/src/hooks/useSetIndexOptions.ts | 85 +++++++++++-------- client/src/utils/buildDefaultConvo.ts | 6 +- client/src/utils/getDefaultEndpoint.ts | 9 +- client/src/utils/mapEndpoints.ts | 37 +++++--- packages/data-provider/src/schemas.ts | 33 +++++-- 18 files changed, 166 insertions(+), 83 deletions(-) diff --git a/api/models/schema/convoSchema.js b/api/models/schema/convoSchema.js index 46555ba3534..38c6faf53b3 100644 --- a/api/models/schema/convoSchema.js +++ b/api/models/schema/convoSchema.js @@ -10,6 +10,9 @@ const convoSchema = mongoose.Schema( index: true, meiliIndex: true, }, + endpointType: { + type: String, + }, title: { type: String, default: 'New Chat', @@ -18,36 +21,29 @@ const convoSchema = mongoose.Schema( user: { type: String, index: true, - // default: null, }, messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }], // google only examples: [{ type: mongoose.Schema.Types.Mixed }], agentOptions: { type: mongoose.Schema.Types.Mixed, - // default: null, }, ...conversationPreset, // for bingAI only bingConversationId: { type: String, - // default: null, }, jailbreakConversationId: { type: String, - // default: null, }, conversationSignature: { type: String, - // default: null, }, clientId: { type: String, - // default: null, }, invocationId: { type: Number, - // default: 1, }, }, { timestamps: true }, diff --git a/client/src/components/Chat/Input/ChatForm.tsx b/client/src/components/Chat/Input/ChatForm.tsx index 9fce650c83d..efd6285d299 100644 --- a/client/src/components/Chat/Input/ChatForm.tsx +++ b/client/src/components/Chat/Input/ChatForm.tsx @@ -30,6 +30,8 @@ export default function ChatForm({ index = 0 }) { }; const { requiresKey } = useRequiresKey(); + const { endpoint: _endpoint, endpointType } = conversation ?? { endpoint: null }; + const endpoint = endpointType ?? _endpoint; return (
) => setText(e.target.value)} setText={setText} submitMessage={submitMessage} - endpoint={conversation?.endpoint} + endpoint={endpoint} /> - + {isSubmitting && showStopButton ? ( ) : ( diff --git a/client/src/components/Chat/Input/HeaderOptions.tsx b/client/src/components/Chat/Input/HeaderOptions.tsx index 20415d1d496..c8acdc7cc26 100644 --- a/client/src/components/Chat/Input/HeaderOptions.tsx +++ b/client/src/components/Chat/Input/HeaderOptions.tsx @@ -2,7 +2,8 @@ import { useRecoilState } from 'recoil'; import { Settings2 } from 'lucide-react'; import { Root, Anchor } from '@radix-ui/react-popover'; import { useState, useEffect, useMemo } from 'react'; -import { tPresetSchema, EModelEndpoint } from 'librechat-data-provider'; +import { tPresetUpdateSchema, EModelEndpoint } from 'librechat-data-provider'; +import type { TPreset } from 'librechat-data-provider'; import { EndpointSettings, SaveAsPresetDialog } from '~/components/Endpoints'; import { ModelSelect } from '~/components/Input/ModelSelect'; import { PluginStoreDialog } from '~/components'; @@ -24,7 +25,7 @@ export default function OptionsBar() { useChatContext(); const { setOption } = useSetIndexOptions(); - const { endpoint, conversationId, jailbreak } = conversation ?? {}; + const { endpoint, endpointType, conversationId, jailbreak } = conversation ?? {}; const altConditions: { [key: string]: boolean } = { bingAI: !!(latestMessage && conversation?.jailbreak && endpoint === 'bingAI'), @@ -106,7 +107,12 @@ export default function OptionsBar() { diff --git a/client/src/components/Chat/Input/PopoverButtons.tsx b/client/src/components/Chat/Input/PopoverButtons.tsx index 9116909b446..ce053648d34 100644 --- a/client/src/components/Chat/Input/PopoverButtons.tsx +++ b/client/src/components/Chat/Input/PopoverButtons.tsx @@ -27,7 +27,8 @@ export default function PopoverButtons({ setShowAgentSettings, } = useChatContext(); - const { model, endpoint } = conversation ?? {}; + const { model, endpoint: _endpoint, endpointType } = conversation ?? {}; + const endpoint = endpointType ?? _endpoint; const isGenerativeModel = model?.toLowerCase()?.includes('gemini'); const isChatModel = !isGenerativeModel && model?.toLowerCase()?.includes('chat'); const isTextModel = !isGenerativeModel && !isChatModel && /code|text/.test(model ?? ''); diff --git a/client/src/components/Chat/Landing.tsx b/client/src/components/Chat/Landing.tsx index cd6f9af4a8b..6a566d44827 100644 --- a/client/src/components/Chat/Landing.tsx +++ b/client/src/components/Chat/Landing.tsx @@ -1,10 +1,12 @@ import type { ReactNode } from 'react'; +import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; import { EModelEndpoint } from 'librechat-data-provider'; import { icons } from './Menus/Endpoints/Icons'; import { useChatContext } from '~/Providers'; import { useLocalize } from '~/hooks'; export default function Landing({ Header }: { Header?: ReactNode }) { + const { data: endpointsConfig } = useGetEndpointsQuery(); const { conversation } = useChatContext(); const localize = useLocalize(); let { endpoint } = conversation ?? {}; @@ -16,13 +18,16 @@ export default function Landing({ Header }: { Header?: ReactNode }) { ) { endpoint = EModelEndpoint.openAI; } + + const iconKey = endpointsConfig?.[endpoint ?? '']?.type ?? endpoint ?? 'unknown'; + return (
{Header && Header}
- {icons[endpoint ?? 'unknown']({ size: 41, className: 'h-2/3 w-2/3' })} + {icons[iconKey]({ size: 41, className: 'h-2/3 w-2/3' })}
diff --git a/client/src/components/Chat/Menus/EndpointsMenu.tsx b/client/src/components/Chat/Menus/EndpointsMenu.tsx index a4b5ed439ae..f7339ac2989 100644 --- a/client/src/components/Chat/Menus/EndpointsMenu.tsx +++ b/client/src/components/Chat/Menus/EndpointsMenu.tsx @@ -21,7 +21,7 @@ const EndpointsMenu: FC = () => { } return ( - +
(null); const { conversationId, parentMessageId, messageId } = message; - const { endpoint } = conversation ?? { endpoint: null }; + const { endpoint: _endpoint, endpointType } = conversation ?? { endpoint: null }; + const endpoint = endpointType ?? _endpoint; const updateMessageMutation = useUpdateMessageMutation(conversationId ?? ''); const localize = useLocalize(); diff --git a/client/src/components/Chat/Messages/HoverButtons.tsx b/client/src/components/Chat/Messages/HoverButtons.tsx index bed7ac63152..2166c72ae8f 100644 --- a/client/src/components/Chat/Messages/HoverButtons.tsx +++ b/client/src/components/Chat/Messages/HoverButtons.tsx @@ -28,7 +28,8 @@ export default function HoverButtons({ latestMessage, }: THoverButtons) { const localize = useLocalize(); - const { endpoint } = conversation ?? {}; + const { endpoint: _endpoint, endpointType } = conversation ?? {}; + const endpoint = endpointType ?? _endpoint; const [isCopied, setIsCopied] = useState(false); const { hideEditButton, regenerateEnabled, continueSupported } = useGenerationsByLatest({ isEditing, diff --git a/client/src/components/Endpoints/EndpointSettings.tsx b/client/src/components/Endpoints/EndpointSettings.tsx index 29b3e3dc30b..3a3c04f069b 100644 --- a/client/src/components/Endpoints/EndpointSettings.tsx +++ b/client/src/components/Endpoints/EndpointSettings.tsx @@ -17,8 +17,9 @@ export default function Settings({ } const { settings, multiViewSettings } = getSettings(isMultiChat); - const { endpoint } = conversation; - const models = modelsConfig?.[endpoint] ?? []; + const { endpoint: _endpoint, endpointType } = conversation; + const models = modelsConfig?.[_endpoint] ?? []; + const endpoint = endpointType ?? _endpoint; const OptionComponent = settings[endpoint]; if (OptionComponent) { diff --git a/client/src/components/Input/ModelSelect/ModelSelect.tsx b/client/src/components/Input/ModelSelect/ModelSelect.tsx index 3eef9d3c730..aeb35400814 100644 --- a/client/src/components/Input/ModelSelect/ModelSelect.tsx +++ b/client/src/components/Input/ModelSelect/ModelSelect.tsx @@ -28,9 +28,11 @@ export default function ModelSelect({ return null; } - const { endpoint } = conversation; + const { endpoint: _endpoint, endpointType } = conversation; + const models = modelsConfig?.[_endpoint] ?? []; + const endpoint = endpointType ?? _endpoint; + const OptionComponent = isMultiChat ? multiChatOptions[endpoint] : options[endpoint]; - const models = modelsConfig?.[endpoint] ?? []; if (!OptionComponent) { return null; diff --git a/client/src/hooks/useNewConvo.ts b/client/src/hooks/useNewConvo.ts index 02768c15246..8d8fd368038 100644 --- a/client/src/hooks/useNewConvo.ts +++ b/client/src/hooks/useNewConvo.ts @@ -68,6 +68,10 @@ const useNewConvo = (index = 0) => { endpointsConfig, }); + if (!conversation.endpointType && endpointsConfig[defaultEndpoint]?.type) { + conversation.endpointType = endpointsConfig[defaultEndpoint]?.type; + } + const models = modelsConfig?.[defaultEndpoint] ?? []; conversation = buildDefaultConvo({ conversation, diff --git a/client/src/hooks/useSSE.ts b/client/src/hooks/useSSE.ts index e5258511c7f..26267296cc3 100644 --- a/client/src/hooks/useSSE.ts +++ b/client/src/hooks/useSSE.ts @@ -5,7 +5,7 @@ import { SSE, createPayload, tMessageSchema, - tConversationSchema, + tConvoUpdateSchema, EModelEndpoint, removeNullishValues, } from 'librechat-data-provider'; @@ -152,10 +152,10 @@ export default function useSSE(submission: TSubmission | null, index = 0) { let update = {} as TConversation; setConversation((prevState) => { - update = tConversationSchema.parse({ + update = tConvoUpdateSchema.parse({ ...prevState, conversationId, - }); + }) as TConversation; setStorage(update); return update; diff --git a/client/src/hooks/useSetIndexOptions.ts b/client/src/hooks/useSetIndexOptions.ts index 3de9fa78597..26d49556f8c 100644 --- a/client/src/hooks/useSetIndexOptions.ts +++ b/client/src/hooks/useSetIndexOptions.ts @@ -1,5 +1,11 @@ import { useRecoilValue, useSetRecoilState } from 'recoil'; -import { TPreset, TPlugin, tConversationSchema, EModelEndpoint } from 'librechat-data-provider'; +import { + TPreset, + TPlugin, + tConvoUpdateSchema, + EModelEndpoint, + TConversation, +} from 'librechat-data-provider'; import type { TSetExample, TSetOption, TSetOptionsPayload } from '~/common'; import usePresetIndexOptions from './usePresetIndexOptions'; import { useChatContext } from '~/Providers/ChatContext'; @@ -36,11 +42,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => { setLastBingSettings({ ...lastBingSettings, jailbreak: newValue }); } - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - ...update, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + ...update, + }) as TConversation, ); }; @@ -51,11 +58,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => { currentExample[type] = { content: newValue }; current[i] = currentExample; update['examples'] = current; - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - ...update, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + ...update, + }) as TConversation, ); }; @@ -64,11 +72,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => { const current = conversation?.examples?.slice() || []; current.push({ input: { content: '' }, output: { content: '' } }); update['examples'] = current; - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - ...update, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + ...update, + }) as TConversation, ); }; @@ -77,21 +86,23 @@ const useSetOptions: TUseSetOptions = (preset = false) => { const current = conversation?.examples?.slice() || []; if (current.length <= 1) { update['examples'] = [{ input: { content: '' }, output: { content: '' } }]; - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - ...update, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + ...update, + }) as TConversation, ); return; } current.pop(); update['examples'] = current; - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - ...update, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + ...update, + }) as TConversation, ); }; @@ -113,11 +124,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => { lastModelUpdate.secondaryModel = newValue; setLastModel(lastModelUpdate); } - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - agentOptions, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + agentOptions, + }) as TConversation, ); }; @@ -139,11 +151,12 @@ const useSetOptions: TUseSetOptions = (preset = false) => { } localStorage.setItem('lastSelectedTools', JSON.stringify(update['tools'])); - setConversation((prevState) => - tConversationSchema.parse({ - ...prevState, - ...update, - }), + setConversation( + (prevState) => + tConvoUpdateSchema.parse({ + ...prevState, + ...update, + }) as TConversation, ); }; diff --git a/client/src/utils/buildDefaultConvo.ts b/client/src/utils/buildDefaultConvo.ts index 2ea4d006b3a..fae95485e92 100644 --- a/client/src/utils/buildDefaultConvo.ts +++ b/client/src/utils/buildDefaultConvo.ts @@ -15,10 +15,12 @@ const buildDefaultConvo = ({ }) => { const { lastSelectedModel, lastSelectedTools, lastBingSettings } = getLocalStorageItems(); const { jailbreak, toneStyle } = lastBingSettings; + const { endpointType } = conversation; if (!endpoint) { return { ...conversation, + endpointType, endpoint, }; } @@ -44,13 +46,15 @@ const buildDefaultConvo = ({ secondaryModels = [...availableModels]; } - const convo = parseConvo(endpoint, lastConversationSetup, { + const convo = parseConvo(endpointType ?? endpoint, lastConversationSetup, { models: possibleModels, secondaryModels, }); + const defaultConvo = { ...conversation, ...convo, + endpointType, endpoint, }; diff --git a/client/src/utils/getDefaultEndpoint.ts b/client/src/utils/getDefaultEndpoint.ts index bdfb7b6470b..a70b20c159e 100644 --- a/client/src/utils/getDefaultEndpoint.ts +++ b/client/src/utils/getDefaultEndpoint.ts @@ -1,4 +1,9 @@ -import type { TConversation, TPreset, TEndpointsConfig } from 'librechat-data-provider'; +import type { + TConversation, + TPreset, + TEndpointsConfig, + EModelEndpoint, +} from 'librechat-data-provider'; import getLocalStorageItems from './getLocalStorageItems'; import mapEndpoints from './mapEndpoints'; @@ -42,7 +47,7 @@ const getDefinedEndpoint = (endpointsConfig: TEndpointsConfig) => { return endpoints.find((e) => Object.hasOwn(endpointsConfig ?? {}, e)); }; -const getDefaultEndpoint = ({ convoSetup, endpointsConfig }: TDefaultEndpoint) => { +const getDefaultEndpoint = ({ convoSetup, endpointsConfig }: TDefaultEndpoint): EModelEndpoint => { return ( getEndpointFromSetup(convoSetup, endpointsConfig) || getEndpointFromLocalStorage(endpointsConfig) || diff --git a/client/src/utils/mapEndpoints.ts b/client/src/utils/mapEndpoints.ts index 1902971a01d..74460e0dfe9 100644 --- a/client/src/utils/mapEndpoints.ts +++ b/client/src/utils/mapEndpoints.ts @@ -1,20 +1,37 @@ import { defaultEndpoints } from 'librechat-data-provider'; -import type { TEndpointsConfig } from 'librechat-data-provider'; +import type { EModelEndpoint, TEndpointsConfig } from 'librechat-data-provider'; -const getEndpointsFilter = (config: TEndpointsConfig) => { +const getEndpointsFilter = (endpointsConfig: TEndpointsConfig) => { const filter: Record = {}; - for (const key of Object.keys(config)) { - filter[key] = !!config[key]; + for (const key of Object.keys(endpointsConfig)) { + filter[key] = !!endpointsConfig[key]; } return filter; }; -const getAvailableEndpoints = (filter: Record) => { - const endpoints = defaultEndpoints; - return endpoints.filter((endpoint) => filter[endpoint]); +const getAvailableEndpoints = ( + filter: Record, + endpointsConfig: TEndpointsConfig, +) => { + const defaultSet = new Set(defaultEndpoints); + const availableEndpoints: EModelEndpoint[] = []; + + for (const endpoint in endpointsConfig) { + // Check if endpoint is in the filter or its type is in defaultEndpoints + if ( + filter[endpoint] || + (endpointsConfig[endpoint]?.type && defaultSet.has(endpointsConfig[endpoint].type)) + ) { + availableEndpoints.push(endpoint as EModelEndpoint); + } + } + + return availableEndpoints; }; -export default function mapEndpoints(config: TEndpointsConfig) { - const filter = getEndpointsFilter(config); - return getAvailableEndpoints(filter).sort((a, b) => config[a].order - config[b].order); +export default function mapEndpoints(endpointsConfig: TEndpointsConfig) { + const filter = getEndpointsFilter(endpointsConfig); + return getAvailableEndpoints(filter, endpointsConfig).sort( + (a, b) => (endpointsConfig[a]?.order ?? 0) - (endpointsConfig[b]?.order ?? 0), + ); } diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index e1ad70657a0..1bfa3a29517 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -159,6 +159,8 @@ export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision']; export const eModelEndpointSchema = z.nativeEnum(EModelEndpoint); +export const extendedModelEndpointSchema = z.union([eModelEndpointSchema, z.string()]); + export const tPluginAuthConfigSchema = z.object({ authField: z.string(), label: z.string(), @@ -261,6 +263,7 @@ export const tConversationSchema = z.object({ title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'), user: z.string().optional(), endpoint: eModelEndpointSchema.nullable(), + endpointType: eModelEndpointSchema.optional(), suggestions: z.array(z.string()).optional(), messages: z.array(z.string()).optional(), tools: z.array(tPluginSchema).optional(), @@ -313,6 +316,18 @@ export const tPresetSchema = tConversationSchema }), ); +export const tConvoUpdateSchema = tConversationSchema.merge( + z.object({ + endpoint: extendedModelEndpointSchema.nullable(), + }), +); + +export const tPresetUpdateSchema = tConversationSchema.merge( + z.object({ + endpoint: extendedModelEndpointSchema.nullable(), + }), +); + export type TPreset = z.infer; type DefaultSchemaValues = Partial; @@ -629,13 +644,17 @@ export type TPossibleValues = { secondaryModels?: string[]; }; -export const parseConvo = ( - endpoint: EModelEndpoint, - conversation: Partial, - possibleValues?: TPossibleValues, - // TODO: POC for default schema - // defaultSchema?: Partial, -) => { +export const parseConvo = ({ + endpoint, + conversation, + possibleValues, +}: // TODO: POC for default schema +// defaultSchema?: Partial, +{ + endpoint: EModelEndpoint; + conversation: Partial; + possibleValues?: TPossibleValues; +}) => { const schema = endpointSchemas[endpoint]; if (!schema) { From e4c0cf41d22efe4fda2507aa7691e3f3ded26005 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 02:17:16 -0500 Subject: [PATCH 19/59] refactor(parseConvo): pass args as an object and change where used accordingly; chore: comment out 'create schema' code --- api/server/middleware/buildEndpointOption.js | 4 +- client/src/utils/buildDefaultConvo.ts | 11 +- client/src/utils/cleanupPreset.ts | 5 +- packages/data-provider/src/schemas.ts | 114 ++++++++++--------- 4 files changed, 72 insertions(+), 62 deletions(-) diff --git a/api/server/middleware/buildEndpointOption.js b/api/server/middleware/buildEndpointOption.js index a72566aead9..06f70935f37 100644 --- a/api/server/middleware/buildEndpointOption.js +++ b/api/server/middleware/buildEndpointOption.js @@ -15,8 +15,8 @@ const buildFunction = { }; function buildEndpointOption(req, res, next) { - const { endpoint } = req.body; - const parsedBody = parseConvo(endpoint, req.body); + const { endpoint, endpointType } = req.body; + const parsedBody = parseConvo({ endpoint, conversation: req.body, endpointType }); req.body.endpointOption = buildFunction[endpoint](endpoint, parsedBody); if (req.body.files) { // hold the promise diff --git a/client/src/utils/buildDefaultConvo.ts b/client/src/utils/buildDefaultConvo.ts index fae95485e92..bf1032c9046 100644 --- a/client/src/utils/buildDefaultConvo.ts +++ b/client/src/utils/buildDefaultConvo.ts @@ -46,9 +46,14 @@ const buildDefaultConvo = ({ secondaryModels = [...availableModels]; } - const convo = parseConvo(endpointType ?? endpoint, lastConversationSetup, { - models: possibleModels, - secondaryModels, + const convo = parseConvo({ + endpoint, + endpointType, + conversation: lastConversationSetup, + possibleValues: { + models: possibleModels, + secondaryModels, + }, }); const defaultConvo = { diff --git a/client/src/utils/cleanupPreset.ts b/client/src/utils/cleanupPreset.ts index 22bd3273400..ffff879a601 100644 --- a/client/src/utils/cleanupPreset.ts +++ b/client/src/utils/cleanupPreset.ts @@ -6,7 +6,7 @@ type TCleanupPreset = { }; const cleanupPreset = ({ preset: _preset }: TCleanupPreset): TPreset => { - const { endpoint } = _preset; + const { endpoint, endpointType } = _preset; if (!endpoint) { console.error(`Unknown endpoint ${endpoint}`, _preset); return { @@ -16,12 +16,13 @@ const cleanupPreset = ({ preset: _preset }: TCleanupPreset): TPreset => { }; } - const parsedPreset = parseConvo(endpoint, _preset); + const parsedPreset = parseConvo({ endpoint, endpointType, conversation: _preset }); return { presetId: _preset?.presetId ?? null, ...parsedPreset, endpoint, + endpointType, title: _preset?.title ?? 'New Preset', } as TPreset; }; diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 1bfa3a29517..04ccea42c83 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -330,7 +330,7 @@ export const tPresetUpdateSchema = tConversationSchema.merge( export type TPreset = z.infer; -type DefaultSchemaValues = Partial; +// type DefaultSchemaValues = Partial; export const openAISchema = tConversationSchema .pick({ @@ -409,55 +409,55 @@ export const googleSchema = tConversationSchema topK: google.topK.default, })); -const createGoogleSchema = (customGoogle: DefaultSchemaValues) => { - const defaults = { ...google, ...customGoogle }; - return tConversationSchema - .pick({ - model: true, - modelLabel: true, - promptPrefix: true, - examples: true, - temperature: true, - maxOutputTokens: true, - topP: true, - topK: true, - }) - .transform((obj) => { - const isGeminiPro = obj?.model?.toLowerCase()?.includes('gemini-pro'); - - const maxOutputTokensMax = isGeminiPro - ? defaults.maxOutputTokens.maxGeminiPro - : defaults.maxOutputTokens.max; - const maxOutputTokensDefault = isGeminiPro - ? defaults.maxOutputTokens.defaultGeminiPro - : defaults.maxOutputTokens.default; - - let maxOutputTokens = obj.maxOutputTokens ?? maxOutputTokensDefault; - maxOutputTokens = Math.min(maxOutputTokens, maxOutputTokensMax); - - return { - ...obj, - model: obj.model ?? defaults.model.default, - modelLabel: obj.modelLabel ?? null, - promptPrefix: obj.promptPrefix ?? null, - examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }], - temperature: obj.temperature ?? defaults.temperature.default, - maxOutputTokens, - topP: obj.topP ?? defaults.topP.default, - topK: obj.topK ?? defaults.topK.default, - }; - }) - .catch(() => ({ - model: defaults.model.default, - modelLabel: null, - promptPrefix: null, - examples: [{ input: { content: '' }, output: { content: '' } }], - temperature: defaults.temperature.default, - maxOutputTokens: defaults.maxOutputTokens.default, - topP: defaults.topP.default, - topK: defaults.topK.default, - })); -}; +// const createGoogleSchema = (customGoogle: DefaultSchemaValues) => { +// const defaults = { ...google, ...customGoogle }; +// return tConversationSchema +// .pick({ +// model: true, +// modelLabel: true, +// promptPrefix: true, +// examples: true, +// temperature: true, +// maxOutputTokens: true, +// topP: true, +// topK: true, +// }) +// .transform((obj) => { +// const isGeminiPro = obj?.model?.toLowerCase()?.includes('gemini-pro'); + +// const maxOutputTokensMax = isGeminiPro +// ? defaults.maxOutputTokens.maxGeminiPro +// : defaults.maxOutputTokens.max; +// const maxOutputTokensDefault = isGeminiPro +// ? defaults.maxOutputTokens.defaultGeminiPro +// : defaults.maxOutputTokens.default; + +// let maxOutputTokens = obj.maxOutputTokens ?? maxOutputTokensDefault; +// maxOutputTokens = Math.min(maxOutputTokens, maxOutputTokensMax); + +// return { +// ...obj, +// model: obj.model ?? defaults.model.default, +// modelLabel: obj.modelLabel ?? null, +// promptPrefix: obj.promptPrefix ?? null, +// examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }], +// temperature: obj.temperature ?? defaults.temperature.default, +// maxOutputTokens, +// topP: obj.topP ?? defaults.topP.default, +// topK: obj.topK ?? defaults.topK.default, +// }; +// }) +// .catch(() => ({ +// model: defaults.model.default, +// modelLabel: null, +// promptPrefix: null, +// examples: [{ input: { content: '' }, output: { content: '' } }], +// temperature: defaults.temperature.default, +// maxOutputTokens: defaults.maxOutputTokens.default, +// topP: defaults.topP.default, +// topK: defaults.topK.default, +// })); +// }; export const bingAISchema = tConversationSchema .pick({ @@ -648,17 +648,21 @@ export const parseConvo = ({ endpoint, conversation, possibleValues, -}: // TODO: POC for default schema -// defaultSchema?: Partial, -{ + endpointType, +}: { endpoint: EModelEndpoint; conversation: Partial; possibleValues?: TPossibleValues; + endpointType?: EModelEndpoint; + // TODO: POC for default schema + // defaultSchema?: Partial, }) => { - const schema = endpointSchemas[endpoint]; + let schema = endpointSchemas[endpoint]; - if (!schema) { + if (!schema && !endpointType) { throw new Error(`Unknown endpoint: ${endpoint}`); + } else if (!schema && endpointType) { + schema = endpointSchemas[endpointType]; } // if (defaultSchema && schemaCreators[endpoint]) { From fe50b0937a84058a8ef2feb9f8783a7032675514 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 02:23:47 -0500 Subject: [PATCH 20/59] chore: remove unused availableModels field in TConfig type --- packages/data-provider/src/types.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index a1db8794b44..a544e4310bd 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -120,7 +120,6 @@ export type TSearchResults = { }; export type TConfig = { - availableModels?: []; name?: string; userProvide?: boolean | null; userProvideURL?: boolean | null; From a18d8512bac46281c0d2d1cf8104a434587a2641 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 02:24:20 -0500 Subject: [PATCH 21/59] refactor(parseCompactConvo): pass args as an object and change where used accordingly --- client/src/hooks/useChatHelpers.ts | 9 +++++++-- packages/data-provider/src/schemas.ts | 28 ++++++++++++++++++--------- 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/client/src/hooks/useChatHelpers.ts b/client/src/hooks/useChatHelpers.ts index bb702cb98f8..7a0793d3a6b 100644 --- a/client/src/hooks/useChatHelpers.ts +++ b/client/src/hooks/useChatHelpers.ts @@ -31,7 +31,7 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) { const { newConversation } = useNewConvo(index); const { useCreateConversationAtom } = store; const { conversation, setConversation } = useCreateConversationAtom(index); - const { conversationId, endpoint } = conversation ?? {}; + const { conversationId, endpoint, endpointType } = conversation ?? {}; const queryParam = paramId === 'new' ? paramId : conversationId ?? paramId ?? ''; @@ -151,7 +151,12 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) { const isEditOrContinue = isEdited || isContinued; // set the endpoint option - const convo = parseCompactConvo(endpoint, conversation ?? {}); + const convo = parseCompactConvo({ + endpoint, + endpointType, + conversation: conversation ?? {}, + }); + const endpointOption = { ...convo, endpoint, diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 04ccea42c83..80f43855602 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -646,14 +646,14 @@ export type TPossibleValues = { export const parseConvo = ({ endpoint, + endpointType, conversation, possibleValues, - endpointType, }: { endpoint: EModelEndpoint; + endpointType?: EModelEndpoint; conversation: Partial; possibleValues?: TPossibleValues; - endpointType?: EModelEndpoint; // TODO: POC for default schema // defaultSchema?: Partial, }) => { @@ -933,19 +933,29 @@ const compactEndpointSchemas: Record = { gptPlugins: compactPluginsSchema, }; -export const parseCompactConvo = ( - endpoint: EModelEndpoint | undefined, - conversation: Partial, - possibleValues?: TPossibleValues, -) => { +export const parseCompactConvo = ({ + endpoint, + endpointType, + conversation, + possibleValues, +}: { + endpoint?: EModelEndpoint; + endpointType?: EModelEndpoint; + conversation: Partial; + possibleValues?: TPossibleValues; + // TODO: POC for default schema + // defaultSchema?: Partial, +}) => { if (!endpoint) { throw new Error(`undefined endpoint: ${endpoint}`); } - const schema = compactEndpointSchemas[endpoint]; + let schema = compactEndpointSchemas[endpoint]; - if (!schema) { + if (!schema && !endpointType) { throw new Error(`Unknown endpoint: ${endpoint}`); + } else if (!schema && endpointType) { + schema = compactEndpointSchemas[endpointType]; } const convo = schema.parse(conversation) as TConversation; From de8bba0cac8a507d09caaae58285c1e65944a13d Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 12:09:59 -0500 Subject: [PATCH 22/59] feat: chat through custom endpoint --- api/cache/getCustomConfig.js | 23 +++++++++++++++++++ api/server/middleware/buildEndpointOption.js | 11 ++++++--- api/server/middleware/validateEndpoint.js | 3 ++- .../services/Endpoints/custom/buildOptions.js | 16 +++++++++++++ api/server/services/Endpoints/custom/index.js | 2 ++ .../Endpoints/custom/initializeClient.js | 22 ++++++++++++++---- client/src/hooks/useChatHelpers.ts | 1 + packages/data-provider/src/createPayload.ts | 11 +++++---- 8 files changed, 77 insertions(+), 12 deletions(-) create mode 100644 api/cache/getCustomConfig.js create mode 100644 api/server/services/Endpoints/custom/buildOptions.js diff --git a/api/cache/getCustomConfig.js b/api/cache/getCustomConfig.js new file mode 100644 index 00000000000..62082c5cbae --- /dev/null +++ b/api/cache/getCustomConfig.js @@ -0,0 +1,23 @@ +const { CacheKeys } = require('librechat-data-provider'); +const loadCustomConfig = require('~/server/services/Config/loadCustomConfig'); +const getLogStores = require('./getLogStores'); + +/** + * Retrieves the configuration object + * @function getCustomConfig */ +async function getCustomConfig() { + const cache = getLogStores(CacheKeys.CONFIG_STORE); + let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG); + + if (!customConfig) { + customConfig = await loadCustomConfig(); + } + + if (!customConfig) { + return null; + } + + return customConfig; +} + +module.exports = getCustomConfig; diff --git a/api/server/middleware/buildEndpointOption.js b/api/server/middleware/buildEndpointOption.js index 06f70935f37..543815e3676 100644 --- a/api/server/middleware/buildEndpointOption.js +++ b/api/server/middleware/buildEndpointOption.js @@ -1,5 +1,6 @@ const { processFiles } = require('~/server/services/Files'); const openAI = require('~/server/services/Endpoints/openAI'); +const custom = require('~/server/services/Endpoints/custom'); const google = require('~/server/services/Endpoints/google'); const anthropic = require('~/server/services/Endpoints/anthropic'); const gptPlugins = require('~/server/services/Endpoints/gptPlugins'); @@ -8,7 +9,7 @@ const { parseConvo, EModelEndpoint } = require('librechat-data-provider'); const buildFunction = { [EModelEndpoint.openAI]: openAI.buildOptions, [EModelEndpoint.google]: google.buildOptions, - [EModelEndpoint.custom]: openAI.buildOptions, + [EModelEndpoint.custom]: custom.buildOptions, [EModelEndpoint.azureOpenAI]: openAI.buildOptions, [EModelEndpoint.anthropic]: anthropic.buildOptions, [EModelEndpoint.gptPlugins]: gptPlugins.buildOptions, @@ -16,8 +17,12 @@ const buildFunction = { function buildEndpointOption(req, res, next) { const { endpoint, endpointType } = req.body; - const parsedBody = parseConvo({ endpoint, conversation: req.body, endpointType }); - req.body.endpointOption = buildFunction[endpoint](endpoint, parsedBody); + const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body }); + req.body.endpointOption = buildFunction[endpointType ?? endpoint]( + endpoint, + parsedBody, + endpointType, + ); if (req.body.files) { // hold the promise req.body.endpointOption.attachments = processFiles(req.body.files); diff --git a/api/server/middleware/validateEndpoint.js b/api/server/middleware/validateEndpoint.js index 6e9c914c8eb..0eeaaeb97dc 100644 --- a/api/server/middleware/validateEndpoint.js +++ b/api/server/middleware/validateEndpoint.js @@ -1,7 +1,8 @@ const { handleError } = require('../utils'); function validateEndpoint(req, res, next) { - const { endpoint } = req.body; + const { endpoint: _endpoint, endpointType } = req.body; + const endpoint = endpointType ?? _endpoint; if (!req.body.text || req.body.text.length === 0) { return handleError(res, { text: 'Prompt empty or too short' }); diff --git a/api/server/services/Endpoints/custom/buildOptions.js b/api/server/services/Endpoints/custom/buildOptions.js new file mode 100644 index 00000000000..63a2d159924 --- /dev/null +++ b/api/server/services/Endpoints/custom/buildOptions.js @@ -0,0 +1,16 @@ +const buildOptions = (endpoint, parsedBody, endpointType) => { + const { chatGptLabel, promptPrefix, ...rest } = parsedBody; + const endpointOption = { + endpoint, + endpointType, + chatGptLabel, + promptPrefix, + modelOptions: { + ...rest, + }, + }; + + return endpointOption; +}; + +module.exports = buildOptions; diff --git a/api/server/services/Endpoints/custom/index.js b/api/server/services/Endpoints/custom/index.js index 9d78a35945e..3cda8d5fece 100644 --- a/api/server/services/Endpoints/custom/index.js +++ b/api/server/services/Endpoints/custom/index.js @@ -1,5 +1,7 @@ const initializeClient = require('./initializeClient'); +const buildOptions = require('./buildOptions'); module.exports = { initializeClient, + buildOptions, }; diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index 6ef4ec12f21..e10e6a73e31 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -1,12 +1,26 @@ const { EModelEndpoint } = require('librechat-data-provider'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); -const { isEnabled } = require('~/server/utils'); +const getCustomConfig = require('~/cache/getCustomConfig'); const { OpenAIClient } = require('~/app'); +const { PROXY } = process.env; + const initializeClient = async ({ req, res, endpointOption }) => { - const { PROXY, CUSTOM_API_KEY, CUSTOM_BASE_URL, CUSTOM_SUMMARIZE } = process.env; const { key: expiresAt, endpoint } = req.body; - const contextStrategy = isEnabled(CUSTOM_SUMMARIZE) ? 'summarize' : null; + const customConfig = await getCustomConfig(); + if (!customConfig) { + throw new Error(`Config not found for the ${endpoint} custom endpoint.`); + } + + const { endpoints = {} } = customConfig; + const customEndpoints = endpoints[EModelEndpoint.custom] ?? []; + const endpointConfig = customEndpoints.find((endpointConfig) => endpointConfig.name === endpoint); + + const CUSTOM_API_KEY = endpointConfig.apiKey; + const CUSTOM_BASE_URL = endpointConfig.baseURL; + + const contextStrategy = endpointConfig.summarize ? 'summarize' : null; + const clientOptions = { contextStrategy, reverseProxyUrl: CUSTOM_BASE_URL ?? null, @@ -17,7 +31,7 @@ const initializeClient = async ({ req, res, endpointOption }) => { }; const credentials = { - [EModelEndpoint.custom]: CUSTOM_API_KEY, + [endpoint]: CUSTOM_API_KEY, }; const isUserProvided = credentials[endpoint] === 'user_provided'; diff --git a/client/src/hooks/useChatHelpers.ts b/client/src/hooks/useChatHelpers.ts index 7a0793d3a6b..1a12d0524b9 100644 --- a/client/src/hooks/useChatHelpers.ts +++ b/client/src/hooks/useChatHelpers.ts @@ -160,6 +160,7 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) { const endpointOption = { ...convo, endpoint, + endpointType, key: getExpiry(), } as TEndpointOption; const responseSender = getResponseSender({ model: conversation?.model, ...endpointOption }); diff --git a/packages/data-provider/src/createPayload.ts b/packages/data-provider/src/createPayload.ts index fd652065f64..e7a2fc46244 100644 --- a/packages/data-provider/src/createPayload.ts +++ b/packages/data-provider/src/createPayload.ts @@ -1,13 +1,16 @@ -import { tConversationSchema } from './schemas'; +import { tConvoUpdateSchema } from './schemas'; import type { TSubmission, TMessage, TEndpointOption } from './types'; import { EModelEndpoint, EndpointURLs } from './types'; export default function createPayload(submission: TSubmission) { const { conversation, message, messages, endpointOption, isEdited, isContinued } = submission; - const { conversationId } = tConversationSchema.parse(conversation); - const { endpoint } = endpointOption as { endpoint: EModelEndpoint }; + const { conversationId } = tConvoUpdateSchema.parse(conversation); + const { endpoint, endpointType } = endpointOption as { + endpoint: EModelEndpoint; + endpointType?: EModelEndpoint; + }; - let server = EndpointURLs[endpoint]; + let server = EndpointURLs[endpointType ?? endpoint]; if (isEdited && endpoint === EModelEndpoint.assistant) { server += '/modify'; From ba43f78cf8e77f82d693b4cbe9acac8a318e5213 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 12:27:08 -0500 Subject: [PATCH 23/59] chore(message/convoSchemas): avoid saving empty arrays --- api/models/schema/convoSchema.js | 2 +- api/models/schema/messageSchema.js | 30 +++++++++++++++++------------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/api/models/schema/convoSchema.js b/api/models/schema/convoSchema.js index 38c6faf53b3..2f84cdd714f 100644 --- a/api/models/schema/convoSchema.js +++ b/api/models/schema/convoSchema.js @@ -24,7 +24,7 @@ const convoSchema = mongoose.Schema( }, messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }], // google only - examples: [{ type: mongoose.Schema.Types.Mixed }], + examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined }, agentOptions: { type: mongoose.Schema.Types.Mixed, }, diff --git a/api/models/schema/messageSchema.js b/api/models/schema/messageSchema.js index 33d799544b2..8e0b688d40f 100644 --- a/api/models/schema/messageSchema.js +++ b/api/models/schema/messageSchema.js @@ -82,22 +82,26 @@ const messageSchema = mongoose.Schema( select: false, default: false, }, - files: [{ type: mongoose.Schema.Types.Mixed }], + files: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined }, plugin: { - latest: { - type: String, - required: false, - }, - inputs: { - type: [mongoose.Schema.Types.Mixed], - required: false, - }, - outputs: { - type: String, - required: false, + type: { + latest: { + type: String, + required: false, + }, + inputs: { + type: [mongoose.Schema.Types.Mixed], + required: false, + default: undefined, + }, + outputs: { + type: String, + required: false, + }, }, + default: undefined, }, - plugins: [{ type: mongoose.Schema.Types.Mixed }], + plugins: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined }, }, { timestamps: true }, ); From e93b8d1aae3140e47c20c93c73cf00278f95d3f3 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 12:38:39 -0500 Subject: [PATCH 24/59] fix(BaseClient/saveMessageToDatabase): save endpointType --- api/app/clients/BaseClient.js | 1 + 1 file changed, 1 insertion(+) diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js index ea63a3ce904..17edf685574 100644 --- a/api/app/clients/BaseClient.js +++ b/api/app/clients/BaseClient.js @@ -520,6 +520,7 @@ class BaseClient { await saveConvo(user, { conversationId: message.conversationId, endpoint: this.options.endpoint, + endpointType: this.options.endpointType, ...endpointOptions, }); } From 050d94851ef52f8a35c0f6858c8ce49b5a2053e6 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 12:39:40 -0500 Subject: [PATCH 25/59] refactor(ChatRoute): show Spinner if endpointsQuery or modelsQuery are still loading, which is apparent with slow fetching of models/remote config on first serve --- client/src/routes/ChatRoute.tsx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/client/src/routes/ChatRoute.tsx b/client/src/routes/ChatRoute.tsx index 8bbb371e43e..de4f066cef3 100644 --- a/client/src/routes/ChatRoute.tsx +++ b/client/src/routes/ChatRoute.tsx @@ -9,6 +9,7 @@ import { import { useNewConvo, useConfigOverride } from '~/hooks'; import ChatView from '~/components/Chat/ChatView'; import useAuthRedirect from './useAuthRedirect'; +import { Spinner } from '~/components/svg'; import store from '~/store'; export default function ChatRoute() { @@ -51,6 +52,10 @@ export default function ChatRoute() { // eslint-disable-next-line react-hooks/exhaustive-deps }, [initialConvoQuery.data, modelsQuery.data, endpointsQuery.data]); + if (endpointsQuery.isLoading || modelsQuery.isLoading) { + return ; + } + if (!isAuthenticated) { return null; } From 13d2be69a8afb0b54d34b577d4af52b484783e54 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 13:19:35 -0500 Subject: [PATCH 26/59] fix(useConversation): assign endpointType if it's missing --- client/src/hooks/useConversation.ts | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/client/src/hooks/useConversation.ts b/client/src/hooks/useConversation.ts index 6ed2bf229bc..d16493c2333 100644 --- a/client/src/hooks/useConversation.ts +++ b/client/src/hooks/useConversation.ts @@ -7,6 +7,7 @@ import type { TSubmission, TPreset, TModelsConfig, + TEndpointsConfig, } from 'librechat-data-provider'; import { buildDefaultConvo, getDefaultEndpoint } from '~/utils'; import useOriginNavigate from './useOriginNavigate'; @@ -18,7 +19,7 @@ const useConversation = () => { const setMessages = useSetRecoilState(store.messages); const setSubmission = useSetRecoilState(store.submission); const resetLatestMessage = useResetRecoilState(store.latestMessage); - const { data: endpointsConfig = {} } = useGetEndpointsQuery(); + const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery(); const switchToConversation = useRecoilCallback( ({ snapshot }) => @@ -37,6 +38,10 @@ const useConversation = () => { endpointsConfig, }); + if (!conversation.endpointType && endpointsConfig[defaultEndpoint]?.type) { + conversation.endpointType = endpointsConfig[defaultEndpoint]?.type; + } + const models = modelsConfig?.[defaultEndpoint] ?? []; conversation = buildDefaultConvo({ conversation, From ed3f232e89b10d3f0c4a55acc57fc412af6c5090 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 13:44:58 -0500 Subject: [PATCH 27/59] fix(SaveAsPreset): pass real endpoint and endpointType when saving Preset) --- client/src/components/Chat/Input/HeaderOptions.tsx | 3 +-- client/src/components/Chat/Input/OptionsBar.tsx | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/client/src/components/Chat/Input/HeaderOptions.tsx b/client/src/components/Chat/Input/HeaderOptions.tsx index c8acdc7cc26..1bfd4b142a2 100644 --- a/client/src/components/Chat/Input/HeaderOptions.tsx +++ b/client/src/components/Chat/Input/HeaderOptions.tsx @@ -25,7 +25,7 @@ export default function OptionsBar() { useChatContext(); const { setOption } = useSetIndexOptions(); - const { endpoint, endpointType, conversationId, jailbreak } = conversation ?? {}; + const { endpoint, conversationId, jailbreak } = conversation ?? {}; const altConditions: { [key: string]: boolean } = { bingAI: !!(latestMessage && conversation?.jailbreak && endpoint === 'bingAI'), @@ -110,7 +110,6 @@ export default function OptionsBar() { preset={ tPresetUpdateSchema.parse({ ...conversation, - endpoint: endpointType ?? endpoint, }) as TPreset } /> diff --git a/client/src/components/Chat/Input/OptionsBar.tsx b/client/src/components/Chat/Input/OptionsBar.tsx index 6f346c8f742..667c886a32b 100644 --- a/client/src/components/Chat/Input/OptionsBar.tsx +++ b/client/src/components/Chat/Input/OptionsBar.tsx @@ -29,7 +29,7 @@ export default function OptionsBar({ messagesTree }) { useChatContext(); const { setOption } = useSetIndexOptions(); - const { endpoint, endpointType, conversationId, jailbreak } = conversation ?? {}; + const { endpoint, conversationId, jailbreak } = conversation ?? {}; const altConditions: { [key: string]: boolean } = { bingAI: !!(latestMessage && conversation?.jailbreak && endpoint === 'bingAI'), @@ -155,7 +155,6 @@ export default function OptionsBar({ messagesTree }) { preset={ tPresetUpdateSchema.parse({ ...conversation, - endpoint: endpointType ?? endpoint, }) as TPreset } /> From 414359f1f97e2ed622f144ce520751b543381a98 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 17:17:41 -0500 Subject: [PATCH 28/59] chore: recorganize types order for TConfig, add `iconURL` --- packages/data-provider/src/types.ts | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index a544e4310bd..44fffcd4e5a 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -120,14 +120,15 @@ export type TSearchResults = { }; export type TConfig = { - name?: string; - userProvide?: boolean | null; - userProvideURL?: boolean | null; + order: number; type?: EModelEndpoint; + azure?: boolean; availableTools?: []; plugins?: Record; - azure?: boolean; - order: number; + name?: string; + iconURL?: string; + userProvide?: boolean | null; + userProvideURL?: boolean | null; }; export type TModelsConfig = Record; From e8e25eded707dc3facb7b528c3d05514d5fb99e6 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 17:20:08 -0500 Subject: [PATCH 29/59] feat: custom endpoint icon support: - use UnknownIcon in all icon contexts - add mistral and openrouter as known endpoints, and add their icons - iconURL support --- client/public/assets/mistral.png | Bin 0 -> 548 bytes client/public/assets/openrouter.png | Bin 0 -> 15406 bytes client/src/common/types.ts | 12 +++++- client/src/components/Chat/Landing.tsx | 10 ++++- .../components/Chat/Menus/Endpoints/Icons.tsx | 3 +- .../Chat/Menus/Endpoints/MenuItem.tsx | 17 ++++++++- .../Chat/Menus/Endpoints/UnknownIcon.tsx | 36 ++++++++++++++++++ .../Chat/Menus/Presets/PresetItems.tsx | 11 +++++- .../src/components/Chat/Menus/PresetsMenu.tsx | 4 +- client/src/components/Conversations/Convo.tsx | 1 + client/src/components/Endpoints/Icon.tsx | 6 ++- .../src/components/Endpoints/MinimalIcon.tsx | 6 ++- packages/data-provider/src/schemas.ts | 5 +++ 13 files changed, 100 insertions(+), 11 deletions(-) create mode 100644 client/public/assets/mistral.png create mode 100644 client/public/assets/openrouter.png create mode 100644 client/src/components/Chat/Menus/Endpoints/UnknownIcon.tsx diff --git a/client/public/assets/mistral.png b/client/public/assets/mistral.png new file mode 100644 index 0000000000000000000000000000000000000000..ff2f3e8b63bd8f193f84c4d070152beed8ee7e80 GIT binary patch literal 548 zcmV+<0^9wGP)Y}1MV*b z|IGmJzySTQ0RN-_`=0>JrUCzP0NP>!|4ag{a|HiM0`Elw%sdAF00!>?2Z}Kc%mfQY zI21)D6-E^pDIysq8yXrK8-S}Y(f|MeBy>_vQvei0LtkKGZi&vW;-{zsRfY%?3goQMIEfk3x#eTU?V~D0@oUz*+yVbEeIP_5fG z?DyvfiupL`NQC#IV>%IE)Hfa97FnaCK{YYF ziLDQ~`LwfLMtNlsQ7VzBtAjXn51U(*UkiuxicEW9ToqbByh0Mftm2OLS)VlfvuHQF m>dpTmOsJ5T|DQJ$!|(-q1XZ9vsKFut0000gmTxNncc&cv=oZvGlY-QDiCV$tA(=_OX;n@|Lo2&JDu5?H@kPV z<~EtU*|+cgKi~U*y|h7+8KmHI&8bIGEr!;;)R+82q zCP~L*45na4y&uho{;_m?n>;j>JIK=HyMXh3-`w{d zgFA!^&tmkyi-&$+wz|-F1kJTf`E2m?ThP1JH;-^VX&2a6J^0jO%tPp()zr5)f4d!n z{bpk>_fh}2sP_O)!uz75WlxM6AM6KT58E31R{+ktp!G)5pMN~$b9PTp&qw@y^j@u2 zr$J8h0Aq6i&RWZ0Kd=U~-H)i*qcCO@et!};zY&B%-yLo7Cmmf;uvW$baDuuU1I=d1 z$b2CTPDNMFgC4iEhQUW2jolD7pZao;zojeRgD#dp7OU~tfcI}ad(gL{JScxYYHn0N z=JnfVmbU*s^P!E{jb|<%vQJZ=$(2BGk4-LivPpc{)-&f#HeL@O_WG;tJ|(|UXzdrg zIifAur!|^dILA&OC z|9e^I*A$Dz1JdBN3d+f_e~$>_^ll{-#Ee3ZNXuQ&mZNzl!mvi&G#@?`W{-ZeCK>Ni)d=k$ye4Wmg z+9vz{h{}#F6$cKh7IKG@{{)Y(`R4D0{(3ed7K-AT{C|noHTkl5J&!>r*`dPShep9` z?0@)kzOqux9UVE&;I{zQO7QWPkKSgv`Q#7!wo<$j1<&gT%==*f=6K(u+9Wu=F~I#G z$u|mCntl&9Oeu^{w-=p3gjb8xy!gslLeU}ec)E+?Y!tUI-u{C_ygaN?%MX8iIFba3D zf32w;0=sh^_Re48=zu+W#xdEqf-t=A(Ala6j91A%dHqS+YCivQ_{m>kKILUma;W!f zY=dPezbu>VTwBe~c2+2-Sq1iW(D%otxdyL@b0f8UKbg~3*T zHi;Z)3}S_km36iVehB_DMixnb*JR{{{Ce{1N$IdvekSnyeM)P+#M44{WnohMG_FeZ z1C8GSIw)Uj2S0SG#QKIP+U(-;8Q8Zs!IwV{I{bBo)-)NekGl%~f3I9S3Rhe|eEp~0 zcvRd5D~cL7QT_0jzfKY-Oceep zxci3$s7`_stu!CcRGkDunEL$RN}xtdd_VqJ8o#ijy1C?j$%kc9jL)C8On%}1s0Q2Q zYrU9)a$&sBdI9n2zQ#+fCUsE`foD3!+V=A0GuHL@AB;1L^&b6hi+T+5uHOx7OHPcd z$xI}VbS}0A=V)$?&0wpL>;4I`@k^+WZK|KG@J)#SUqSzWn&_{`z!ZOOMm_STVzKuV z6G^8Bo6fN4oCIs&e4Ojt4_bGI(A7E?4vF8vBlMG*Lvg-`tJCctbmch2!ncE7%Ii9p zQ+>eCJCI91#pIo&eLZyR#`c>0I_U0L9X3pFSZ9!n{1*00W~6Z*nwFOu>WY*{W1l?? zT*-5#59BGljsiSARWWiWrJIH2dGW-nN3b%g9fQ_+Wx- z>-QXKo62{gH^fzfwgok1*f_GKw>^jZ37#xkwN3F){k%eHbp{fIMREbYS(eH!2dqBu zy&;XfF=r>%X~9-Av%~If1o3*`iGEt+*jqa6!&${*PvY1OvRWVUV6zm00hY3{akipR9{ zBL;ci9?Z-kd(tUd(!dd=AGR20R=8s}@cWr*@O65QDE;;E@($ebyN~V)i2Ct;gZ}Y7 zzZG%Z&x^eu-Wp0L|8e@sc8$UQtQt0Zk4dWwipA6F#r53=djoW|7sJ={+AGrR=SqQ} z0Uvf|(z8`RuWR!BbB-o|(Ng&tlxH{2N7C~04(6-H;v%8*Xxqr#0{Kx4r-+;Z`uXtQ zDOMU$m6Jh7AM(vD@X5OYSJcP7O#XD;>i&a0xUE{ply4o!LH97s%=~uwp!fy%F_DwY zJ%~TShcFL_XV@TO?jKjwzN1^kLrwJ8@!)(}E+MX%mnNq0%Zk<{;yaud^FlgZ@KN|W z@bAX)cQ~4Ibw$r!TvGWw+-JSSQQ1#$ru9ICeCQt9{X_XBZJ)DIZHMm9f``8X=AH(g8ImY9#nP;`Di}DKh?mpGq(%7gl8H@BG_T|^4)57?yF6_%MK^Av` zhv5GKL}(EkYp@p(x8F>)oN-Fpc%huRg4#yo?&^QRix@r0$B)9V-02MPnId;gL|7}S z*;BE<^RWF}(31V@fUSZJ)>*ZJ98aVcAwsu*tn2&o*>3Vbh)H_z7~tbkw2eI5y~B*V zE_@Vr3^gS;?+(!!e80gSh5xv`T|efhPvo6=A>3}=agfiXd%JY+qwd@Lj-z+*wkTxH zZ9`7Ifc|&W%6E& (newValue: number | string | boolean) => void; @@ -196,7 +203,8 @@ export type IconProps = Pick & button?: boolean; message?: boolean; className?: string; - endpoint?: string | null; + endpoint?: EModelEndpoint | string | null; + endpointType?: EModelEndpoint | null; }; export type Option = Record & { diff --git a/client/src/components/Chat/Landing.tsx b/client/src/components/Chat/Landing.tsx index 6a566d44827..fb38c38e435 100644 --- a/client/src/components/Chat/Landing.tsx +++ b/client/src/components/Chat/Landing.tsx @@ -19,7 +19,7 @@ export default function Landing({ Header }: { Header?: ReactNode }) { endpoint = EModelEndpoint.openAI; } - const iconKey = endpointsConfig?.[endpoint ?? '']?.type ?? endpoint ?? 'unknown'; + const iconKey = endpointsConfig?.[endpoint ?? '']?.type ? 'unknown' : endpoint ?? 'unknown'; return (
@@ -27,7 +27,13 @@ export default function Landing({ Header }: { Header?: ReactNode }) {
- {icons[iconKey]({ size: 41, className: 'h-2/3 w-2/3' })} + {icons[iconKey]({ + size: 41, + context: 'landing', + className: 'h-2/3 w-2/3', + endpoint: endpoint as EModelEndpoint | string, + iconURL: endpointsConfig?.[endpoint ?? ''].iconURL, + })}
diff --git a/client/src/components/Chat/Menus/Endpoints/Icons.tsx b/client/src/components/Chat/Menus/Endpoints/Icons.tsx index a1bffb420c2..4bfb593d837 100644 --- a/client/src/components/Chat/Menus/Endpoints/Icons.tsx +++ b/client/src/components/Chat/Menus/Endpoints/Icons.tsx @@ -9,6 +9,7 @@ import { CustomMinimalIcon, LightningIcon, } from '~/components/svg'; +import UnknownIcon from './UnknownIcon'; import { cn } from '~/utils'; export const icons = { @@ -41,5 +42,5 @@ export const icons = { > ), - unknown: GPTIcon, + unknown: UnknownIcon, }; diff --git a/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx b/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx index b555ad8d444..f79e6e20523 100644 --- a/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx +++ b/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx @@ -1,6 +1,7 @@ import { useState } from 'react'; import { Settings } from 'lucide-react'; import { EModelEndpoint } from 'librechat-data-provider'; +import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; import type { FC } from 'react'; import { useLocalize, useUserKey } from '~/hooks'; import { SetKeyDialog } from '~/components/Input/SetKeyDialog'; @@ -26,7 +27,8 @@ const MenuItem: FC = ({ userProvidesKey, ...rest }) => { - const Icon = icons[endpoint] ?? icons.unknown; + const { data: endpointsConfig } = useGetEndpointsQuery(); + const [isDialogOpen, setDialogOpen] = useState(false); const { newConversation } = useChatContext(); const { getExpiry } = useUserKey(endpoint); @@ -44,6 +46,9 @@ const MenuItem: FC = ({ } }; + const iconKey = endpointsConfig?.[endpoint ?? '']?.type ? 'unknown' : endpoint ?? 'unknown'; + const Icon = icons[iconKey]; + return ( <>
= ({
- {} + { + + }
{title}
{description}
diff --git a/client/src/components/Chat/Menus/Endpoints/UnknownIcon.tsx b/client/src/components/Chat/Menus/Endpoints/UnknownIcon.tsx new file mode 100644 index 00000000000..496f627acad --- /dev/null +++ b/client/src/components/Chat/Menus/Endpoints/UnknownIcon.tsx @@ -0,0 +1,36 @@ +import { EModelEndpoint, KnownEndpoints } from 'librechat-data-provider'; +import { CustomMinimalIcon } from '~/components/svg'; + +export default function UnknownIcon({ + className = '', + endpoint, + iconURL, + context, +}: { + iconURL?: string; + className?: string; + endpoint: EModelEndpoint | string | null; + context?: 'landing' | 'menu-item' | 'nav' | 'message'; +}) { + if (!endpoint) { + return ; + } + + const currentEndpoint = endpoint.toLowerCase(); + + if (iconURL) { + return {`${endpoint}; + } else if (currentEndpoint === KnownEndpoints.mistral) { + return ( + Mistral AI Icon + ); + } else if (currentEndpoint === KnownEndpoints.openrouter) { + return OpenRouter Icon; + } + + return ; +} diff --git a/client/src/components/Chat/Menus/Presets/PresetItems.tsx b/client/src/components/Chat/Menus/Presets/PresetItems.tsx index bdbc7870bf2..3e30b6b2c5b 100644 --- a/client/src/components/Chat/Menus/Presets/PresetItems.tsx +++ b/client/src/components/Chat/Menus/Presets/PresetItems.tsx @@ -2,6 +2,7 @@ import { Trash2 } from 'lucide-react'; import { useRecoilValue } from 'recoil'; import { Close } from '@radix-ui/react-popover'; import { Flipper, Flipped } from 'react-flip-toolkit'; +import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; import type { FC } from 'react'; import type { TPreset } from 'librechat-data-provider'; import FileUpload from '~/components/Input/EndpointMenu/FileUpload'; @@ -31,6 +32,7 @@ const PresetItems: FC<{ clearAllPresets, onFileSelected, }) => { + const { data: endpointsConfig } = useGetEndpointsQuery(); const defaultPreset = useRecoilValue(store.defaultPreset); const localize = useLocalize(); return ( @@ -93,6 +95,10 @@ const PresetItems: FC<{ return null; } + const iconKey = endpointsConfig?.[preset.endpoint ?? '']?.type + ? 'unknown' + : preset.endpoint ?? 'unknown'; + return (
@@ -103,8 +109,11 @@ const PresetItems: FC<{ title={getPresetTitle(preset)} disableHover={true} onClick={() => onSelectPreset(preset)} - icon={icons[preset.endpoint ?? 'unknown']({ + icon={icons[iconKey]({ + context: 'menu-item', + iconURL: endpointsConfig?.[preset.endpoint ?? ''].iconURL, className: 'icon-md mr-1 dark:text-white', + endpoint: preset.endpoint, })} selected={false} data-testid={`preset-item-${preset}`} diff --git a/client/src/components/Chat/Menus/PresetsMenu.tsx b/client/src/components/Chat/Menus/PresetsMenu.tsx index bdd47895e92..01a34a10bc9 100644 --- a/client/src/components/Chat/Menus/PresetsMenu.tsx +++ b/client/src/components/Chat/Menus/PresetsMenu.tsx @@ -3,6 +3,7 @@ import { BookCopy } from 'lucide-react'; import { Content, Portal, Root, Trigger } from '@radix-ui/react-popover'; import { EditPresetDialog, PresetItems } from './Presets'; import { useLocalize, usePresets } from '~/hooks'; +import { useChatContext } from '~/Providers'; import { cn } from '~/utils'; const PresetsMenu: FC = () => { @@ -18,6 +19,7 @@ const PresetsMenu: FC = () => { submitPreset, exportPreset, } = usePresets(); + const { preset } = useChatContext(); const presets = presetsQuery.data || []; return ( @@ -64,7 +66,7 @@ const PresetsMenu: FC = () => {
- + {preset && } ); }; diff --git a/client/src/components/Conversations/Convo.tsx b/client/src/components/Conversations/Convo.tsx index 5e3cd822e73..67bb347d0d1 100644 --- a/client/src/components/Conversations/Convo.tsx +++ b/client/src/components/Conversations/Convo.tsx @@ -87,6 +87,7 @@ export default function Conversation({ conversation, retainView, toggleNav, i }) const icon = MinimalIcon({ size: 20, endpoint: conversation.endpoint, + endpointType: conversation.endpointType, model: conversation.model, error: false, className: 'mr-0', diff --git a/client/src/components/Endpoints/Icon.tsx b/client/src/components/Endpoints/Icon.tsx index 6b80ad0c170..f8ef0dbffcb 100644 --- a/client/src/components/Endpoints/Icon.tsx +++ b/client/src/components/Endpoints/Icon.tsx @@ -1,4 +1,5 @@ import { EModelEndpoint } from 'librechat-data-provider'; +import UnknownIcon from '~/components/Chat/Menus/Endpoints/UnknownIcon'; import { Plugin, GPTIcon, @@ -100,7 +101,10 @@ const Icon: React.FC = (props) => { name: 'Custom', }, null: { icon: , bg: 'grey', name: 'N/A' }, - default: { icon: , bg: 'grey', name: 'UNKNOWN' }, + default: { + icon: , + name: endpoint, + }, }; const { icon, bg, name } = diff --git a/client/src/components/Endpoints/MinimalIcon.tsx b/client/src/components/Endpoints/MinimalIcon.tsx index ebf13457da4..2bf05c9ffa5 100644 --- a/client/src/components/Endpoints/MinimalIcon.tsx +++ b/client/src/components/Endpoints/MinimalIcon.tsx @@ -1,4 +1,5 @@ import { EModelEndpoint } from 'librechat-data-provider'; +import UnknownIcon from '~/components/Chat/Menus/Endpoints/UnknownIcon'; import { AzureMinimalIcon, OpenAIMinimalIcon, @@ -39,7 +40,10 @@ const MinimalIcon: React.FC = (props) => { }, [EModelEndpoint.bingAI]: { icon: , name: 'BingAI' }, [EModelEndpoint.chatGPTBrowser]: { icon: , name: 'ChatGPT' }, - default: { icon: , name: 'UNKNOWN' }, + default: { + icon: , + name: endpoint, + }, }; const { icon, name } = endpointIcons[endpoint] ?? endpointIcons.default; diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 80f43855602..de8a165c4f8 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -12,6 +12,11 @@ export enum EModelEndpoint { custom = 'custom', } +export enum KnownEndpoints { + mistral = 'mistral', + openrouter = 'openrouter', +} + export const defaultEndpoints: EModelEndpoint[] = [ EModelEndpoint.openAI, EModelEndpoint.assistant, From 16bb96f7a55d6b338dc4d8366483ab5e1e8fb7b2 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 17:25:59 -0500 Subject: [PATCH 30/59] fix(presetSchema): move endpointType to default schema definitions shared between convoSchema and defaults --- api/models/schema/convoSchema.js | 3 --- api/models/schema/defaults.js | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/api/models/schema/convoSchema.js b/api/models/schema/convoSchema.js index 2f84cdd714f..a282287eccb 100644 --- a/api/models/schema/convoSchema.js +++ b/api/models/schema/convoSchema.js @@ -10,9 +10,6 @@ const convoSchema = mongoose.Schema( index: true, meiliIndex: true, }, - endpointType: { - type: String, - }, title: { type: String, default: 'New Chat', diff --git a/api/models/schema/defaults.js b/api/models/schema/defaults.js index c3bcee5c739..feedf1019ae 100644 --- a/api/models/schema/defaults.js +++ b/api/models/schema/defaults.js @@ -5,6 +5,9 @@ const conversationPreset = { default: null, required: true, }, + endpointType: { + type: String, + }, // for azureOpenAI, openAI, chatGPTBrowser only model: { type: String, From 1a9640d3fc39744a7aafc4422310ed1c053672b0 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 17:30:59 -0500 Subject: [PATCH 31/59] refactor(Settings/OpenAI): remove legacy `isOpenAI` flag --- .../components/Endpoints/Settings/OpenAI.tsx | 89 +++++++++---------- 1 file changed, 41 insertions(+), 48 deletions(-) diff --git a/client/src/components/Endpoints/Settings/OpenAI.tsx b/client/src/components/Endpoints/Settings/OpenAI.tsx index 062fd1e0bc9..77b224c8d37 100644 --- a/client/src/components/Endpoints/Settings/OpenAI.tsx +++ b/client/src/components/Endpoints/Settings/OpenAI.tsx @@ -1,6 +1,4 @@ import TextareaAutosize from 'react-textarea-autosize'; -import type { TModelSelectProps } from '~/common'; -import { ESide } from '~/common'; import { SelectDropDown, Input, @@ -10,9 +8,11 @@ import { HoverCard, HoverCardTrigger, } from '~/components/ui'; -import OptionHover from './OptionHover'; import { cn, defaultTextProps, optionText, removeFocusOutlines } from '~/utils/'; +import type { TModelSelectProps } from '~/common'; +import OptionHover from './OptionHover'; import { useLocalize } from '~/hooks'; +import { ESide } from '~/common'; export default function Settings({ conversation, setOption, models, readonly }: TModelSelectProps) { const localize = useLocalize(); @@ -28,9 +28,6 @@ export default function Settings({ conversation, setOption, models, readonly }: frequency_penalty: freqP, presence_penalty: presP, } = conversation; - const endpoint = conversation.endpoint || 'openAI'; - const isOpenAI = endpoint === 'openAI' || endpoint === 'azureOpenAI'; - const setModel = setOption('model'); const setChatGptLabel = setOption('chatGptLabel'); const setPromptPrefix = setOption('promptPrefix'); @@ -52,47 +49,43 @@ export default function Settings({ conversation, setOption, models, readonly }: containerClassName="flex w-full resize-none" />
- {isOpenAI && ( - <> -
- - setChatGptLabel(e.target.value ?? null)} - placeholder={localize('com_endpoint_openai_custom_name_placeholder')} - className={cn( - defaultTextProps, - 'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700', - 'flex h-10 max-h-10 w-full resize-none px-3 py-2', - removeFocusOutlines, - )} - /> -
-
- - setPromptPrefix(e.target.value ?? null)} - placeholder={localize('com_endpoint_openai_prompt_prefix_placeholder')} - className={cn( - defaultTextProps, - 'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700', - 'flex max-h-[138px] min-h-[100px] w-full resize-none px-3 py-2 ', - )} - /> -
- - )} +
+ + setChatGptLabel(e.target.value ?? null)} + placeholder={localize('com_endpoint_openai_custom_name_placeholder')} + className={cn( + defaultTextProps, + 'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700', + 'flex h-10 max-h-10 w-full resize-none px-3 py-2', + removeFocusOutlines, + )} + /> +
+
+ + setPromptPrefix(e.target.value ?? null)} + placeholder={localize('com_endpoint_openai_prompt_prefix_placeholder')} + className={cn( + defaultTextProps, + 'dark:bg-gray-700 dark:hover:bg-gray-700/60 dark:focus:bg-gray-700', + 'flex max-h-[138px] min-h-[100px] w-full resize-none px-3 py-2 ', + )} + /> +
@@ -101,7 +94,7 @@ export default function Settings({ conversation, setOption, models, readonly }: Date: Mon, 1 Jan 2024 17:51:57 -0500 Subject: [PATCH 32/59] fix(OpenAIClient): do not invoke abortCompletion on completion error --- api/app/clients/OpenAIClient.js | 1 - 1 file changed, 1 deletion(-) diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 25287fa29b3..b6ade92668c 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -859,7 +859,6 @@ ${convo} (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) ) { logger.error('[OpenAIClient] Known OpenAI error:', err); - await abortController.abortCompletion(); return intermediateReply; } else if (err instanceof OpenAI.APIError) { if (intermediateReply) { From 2821c6213e1a24a772293b736baa92ee6a8e982b Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Mon, 1 Jan 2024 19:47:29 -0500 Subject: [PATCH 33/59] feat: add responseSender/label support for custom endpoints: - use defaultModelLabel field in endpointOption - add model defaults for custom endpoints in `getResponseSender` - add `useGetSender` hook which uses EndpointsQuery to determine `defaultModelLabel` - include defaultModelLabel from endpointConfig in custom endpoint client options - pass `endpointType` to `getResponseSender` --- api/app/clients/OpenAIClient.js | 2 ++ api/server/controllers/AskController.js | 7 +++++- api/server/controllers/EditController.js | 7 +++++- .../services/Config/loadConfigEndpoints.js | 3 ++- .../Endpoints/custom/initializeClient.js | 1 + client/src/hooks/Conversations/index.ts | 1 + .../src/hooks/Conversations/useGetSender.ts | 15 ++++++++++++ client/src/hooks/Input/useTextarea.ts | 10 ++++---- client/src/hooks/useChatHelpers.ts | 12 +++++++--- packages/data-provider/src/schemas.ts | 23 +++++++++++++++++-- packages/data-provider/src/types.ts | 1 + 11 files changed, 70 insertions(+), 12 deletions(-) create mode 100644 client/src/hooks/Conversations/useGetSender.ts diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index b6ade92668c..0909e90ad8c 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -147,7 +147,9 @@ class OpenAIClient extends BaseClient { getResponseSender({ model: this.modelOptions.model, endpoint: this.options.endpoint, + endpointType: this.options.endpointType, chatGptLabel: this.options.chatGptLabel, + defaultModelLabel: this.options.defaultModelLabel, }); this.userLabel = this.options.userLabel || 'User'; diff --git a/api/server/controllers/AskController.js b/api/server/controllers/AskController.js index 78933feebc1..ebdfcc64436 100644 --- a/api/server/controllers/AskController.js +++ b/api/server/controllers/AskController.js @@ -9,6 +9,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => { text, endpointOption, conversationId, + defaultModelLabel, parentMessageId = null, overrideParentMessageId = null, } = req.body; @@ -22,7 +23,11 @@ const AskController = async (req, res, next, initializeClient, addTitle) => { let responseMessageId; let lastSavedTimestamp = 0; let saveDelay = 100; - const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model }); + const sender = getResponseSender({ + ...endpointOption, + model: endpointOption.modelOptions.model, + defaultModelLabel, + }); const newConvo = !conversationId; const user = req.user.id; diff --git a/api/server/controllers/EditController.js b/api/server/controllers/EditController.js index 72ee58026a4..1cbba93ca75 100644 --- a/api/server/controllers/EditController.js +++ b/api/server/controllers/EditController.js @@ -10,6 +10,7 @@ const EditController = async (req, res, next, initializeClient) => { generation, endpointOption, conversationId, + defaultModelLabel, responseMessageId, isContinued = false, parentMessageId = null, @@ -29,7 +30,11 @@ const EditController = async (req, res, next, initializeClient) => { let promptTokens; let lastSavedTimestamp = 0; let saveDelay = 100; - const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model }); + const sender = getResponseSender({ + ...endpointOption, + model: endpointOption.modelOptions.model, + defaultModelLabel, + }); const userMessageId = parentMessageId; const user = req.user.id; diff --git a/api/server/services/Config/loadConfigEndpoints.js b/api/server/services/Config/loadConfigEndpoints.js index b91947ba266..b31addb3b5a 100644 --- a/api/server/services/Config/loadConfigEndpoints.js +++ b/api/server/services/Config/loadConfigEndpoints.js @@ -33,11 +33,12 @@ async function loadConfigEndpoints() { for (let i = 0; i < customEndpoints.length; i++) { const endpoint = customEndpoints[i]; - const { baseURL, apiKey, name } = endpoint; + const { baseURL, apiKey, name, defaultModelLabel } = endpoint; endpointsConfig[name] = { type: EModelEndpoint.custom, userProvide: isUserProvided(apiKey), userProvideURL: isUserProvided(baseURL), + defaultModelLabel, }; } } diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index e10e6a73e31..10950d47505 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -27,6 +27,7 @@ const initializeClient = async ({ req, res, endpointOption }) => { proxy: PROXY ?? null, req, res, + defaultModelLabel: endpointConfig.defaultModelLabel, ...endpointOption, }; diff --git a/client/src/hooks/Conversations/index.ts b/client/src/hooks/Conversations/index.ts index 666341ddd64..5a84f39f3ab 100644 --- a/client/src/hooks/Conversations/index.ts +++ b/client/src/hooks/Conversations/index.ts @@ -1 +1,2 @@ export { default as usePresets } from './usePresets'; +export { default as useGetSender } from './useGetSender'; diff --git a/client/src/hooks/Conversations/useGetSender.ts b/client/src/hooks/Conversations/useGetSender.ts new file mode 100644 index 00000000000..4afacfa530b --- /dev/null +++ b/client/src/hooks/Conversations/useGetSender.ts @@ -0,0 +1,15 @@ +import { useCallback } from 'react'; +import { getResponseSender } from 'librechat-data-provider'; +import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; +import type { TEndpointOption, TEndpointsConfig } from 'librechat-data-provider'; + +export default function useGetSender() { + const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery(); + return useCallback( + (endpointOption: TEndpointOption) => { + const { defaultModelLabel } = endpointsConfig[endpointOption.endpoint ?? ''] ?? {}; + return getResponseSender({ ...endpointOption, defaultModelLabel }); + }, + [endpointsConfig], + ); +} diff --git a/client/src/hooks/Input/useTextarea.ts b/client/src/hooks/Input/useTextarea.ts index 9d5e5cf8199..6705a8157b5 100644 --- a/client/src/hooks/Input/useTextarea.ts +++ b/client/src/hooks/Input/useTextarea.ts @@ -1,7 +1,8 @@ -import { useEffect, useRef } from 'react'; import debounce from 'lodash/debounce'; -import { TEndpointOption, getResponseSender } from 'librechat-data-provider'; +import { useEffect, useRef } from 'react'; +import { TEndpointOption } from 'librechat-data-provider'; import type { KeyboardEvent } from 'react'; +import useGetSender from '~/hooks/Conversations/useGetSender'; import { useChatContext } from '~/Providers/ChatContext'; import useFileHandling from '~/hooks/useFileHandling'; import useLocalize from '~/hooks/useLocalize'; @@ -14,6 +15,7 @@ export default function useTextarea({ setText, submitMessage, disabled = false } const isComposing = useRef(false); const inputRef = useRef(null); const { handleFiles } = useFileHandling(); + const getSender = useGetSender(); const localize = useLocalize(); const { conversationId, jailbreak } = conversation || {}; @@ -59,7 +61,7 @@ export default function useTextarea({ setText, submitMessage, disabled = false } return localize('com_endpoint_message_not_appendable'); } - const sender = getResponseSender(conversation as TEndpointOption); + const sender = getSender(conversation as TEndpointOption); return `${localize('com_endpoint_message')} ${sender ? sender : 'ChatGPT'}…`; }; @@ -82,7 +84,7 @@ export default function useTextarea({ setText, submitMessage, disabled = false } debouncedSetPlaceholder(); return () => debouncedSetPlaceholder.cancel(); - }, [conversation, disabled, latestMessage, isNotAppendable, localize]); + }, [conversation, disabled, latestMessage, isNotAppendable, localize, getSender]); const handleKeyDown = (e: KeyEvent) => { if (e.key === 'Enter' && isSubmitting) { diff --git a/client/src/hooks/useChatHelpers.ts b/client/src/hooks/useChatHelpers.ts index 1a12d0524b9..bffd4de8bb0 100644 --- a/client/src/hooks/useChatHelpers.ts +++ b/client/src/hooks/useChatHelpers.ts @@ -1,18 +1,20 @@ import { v4 } from 'uuid'; import { useCallback, useState } from 'react'; import { useQueryClient } from '@tanstack/react-query'; +import { QueryKeys, parseCompactConvo } from 'librechat-data-provider'; import { useRecoilState, useResetRecoilState, useSetRecoilState } from 'recoil'; -import { QueryKeys, parseCompactConvo, getResponseSender } from 'librechat-data-provider'; -import { useGetMessagesByConvoId } from 'librechat-data-provider/react-query'; +import { useGetMessagesByConvoId, useGetEndpointsQuery } from 'librechat-data-provider/react-query'; import type { TMessage, TSubmission, TEndpointOption, TConversation, + TEndpointsConfig, TGetConversationsResponse, } from 'librechat-data-provider'; import type { TAskFunction } from '~/common'; import useSetFilesToDelete from './useSetFilesToDelete'; +import useGetSender from './Conversations/useGetSender'; import { useAuthContext } from './AuthContext'; import useUserKey from './Input/useUserKey'; import useNewConvo from './useNewConvo'; @@ -20,10 +22,12 @@ import store from '~/store'; // this to be set somewhere else export default function useChatHelpers(index = 0, paramId: string | undefined) { + const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery(); const [files, setFiles] = useRecoilState(store.filesByIndex(index)); const [showStopButton, setShowStopButton] = useState(true); const [filesLoading, setFilesLoading] = useState(false); const setFilesToDelete = useSetFilesToDelete(); + const getSender = useGetSender(); const queryClient = useQueryClient(); const { isAuthenticated } = useAuthContext(); @@ -157,13 +161,15 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) { conversation: conversation ?? {}, }); + const { defaultModelLabel } = endpointsConfig[endpoint ?? ''] ?? {}; const endpointOption = { ...convo, endpoint, endpointType, + defaultModelLabel, key: getExpiry(), } as TEndpointOption; - const responseSender = getResponseSender({ model: conversation?.model, ...endpointOption }); + const responseSender = getSender({ model: conversation?.model, ...endpointOption }); let currentMessages: TMessage[] | null = getMessages() ?? []; diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index de8a165c4f8..5b26d7e3586 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -690,6 +690,8 @@ export const parseConvo = ({ export type TEndpointOption = { endpoint: EModelEndpoint; + endpointType?: EModelEndpoint; + defaultModelLabel?: string; model?: string | null; promptPrefix?: string; temperature?: number; @@ -700,7 +702,8 @@ export type TEndpointOption = { }; export const getResponseSender = (endpointOption: TEndpointOption): string => { - const { model, endpoint, chatGptLabel, modelLabel, jailbreak } = endpointOption; + const { model, endpoint, endpointType, defaultModelLabel, chatGptLabel, modelLabel, jailbreak } = + endpointOption; if ( [ @@ -716,6 +719,8 @@ export const getResponseSender = (endpointOption: TEndpointOption): string => { return 'GPT-3.5'; } else if (model && model.includes('gpt-4')) { return 'GPT-4'; + } else if (model && model.includes('mistral')) { + return 'Mistral'; } return alternateName[endpoint] ?? 'ChatGPT'; } @@ -740,9 +745,23 @@ export const getResponseSender = (endpointOption: TEndpointOption): string => { return 'PaLM2'; } - if (endpoint === EModelEndpoint.custom) { + if ( + endpoint === EModelEndpoint.custom || + endpointType === EModelEndpoint.custom || + !defaultEndpoints.includes(endpoint) + ) { if (modelLabel) { return modelLabel; + } else if (chatGptLabel) { + return chatGptLabel; + } else if (model && model.includes('mistral')) { + return 'Mistral'; + } else if (model && model.includes('gpt-3')) { + return 'GPT-3.5'; + } else if (model && model.includes('gpt-4')) { + return 'GPT-4'; + } else if (defaultModelLabel) { + return defaultModelLabel; } return 'AI'; diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index 44fffcd4e5a..4693929deb1 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -125,6 +125,7 @@ export type TConfig = { azure?: boolean; availableTools?: []; plugins?: Record; + defaultModelLabel?: string; name?: string; iconURL?: string; userProvide?: boolean | null; From d12e5a04eb0e13ef194625b053dc769b97338793 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 08:04:39 -0500 Subject: [PATCH 34/59] feat(OpenAIClient): use custom options from config file --- api/app/clients/OpenAIClient.js | 89 +++++++++++++++---- .../Endpoints/custom/initializeClient.js | 13 ++- .../services/Endpoints/openAI/addTitle.js | 4 + 3 files changed, 84 insertions(+), 22 deletions(-) diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 0909e90ad8c..dee80cef33e 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -94,10 +94,23 @@ class OpenAIClient extends BaseClient { } const { reverseProxyUrl: reverseProxy } = this.options; + + if ( + !this.useOpenRouter && + reverseProxy && + reverseProxy.includes('https://openrouter.ai/api/v1') + ) { + this.useOpenRouter = true; + } + this.FORCE_PROMPT = isEnabled(OPENAI_FORCE_PROMPT) || (reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat')); + if (typeof this.options.forcePrompt === 'boolean') { + this.FORCE_PROMPT = this.options.forcePrompt; + } + if (this.azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) { this.azureEndpoint = genAzureChatCompletion(this.azure, this.modelOptions.model); this.modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL; @@ -532,6 +545,19 @@ class OpenAIClient extends BaseClient { return llm; } + /** + * Generates a concise title for a conversation based on the user's input text and response. + * Uses either specified method or starts with the OpenAI `functions` method (using LangChain). + * If the `functions` method fails, it falls back to the `completion` method, + * which involves sending a chat completion request with specific instructions for title generation. + * + * @param {Object} params - The parameters for the conversation title generation. + * @param {string} params.text - The user's input. + * @param {string} [params.responseText=''] - The AI's immediate response to the user. + * + * @returns {Promise} A promise that resolves to the generated conversation title. + * In case of failure, it will return the default title, "New Chat". + */ async titleConvo({ text, responseText = '' }) { let title = 'New Chat'; const convo = `||>User: @@ -541,32 +567,25 @@ class OpenAIClient extends BaseClient { const { OPENAI_TITLE_MODEL } = process.env ?? {}; + const model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo'; + const modelOptions = { - model: OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo', + // TODO: remove the gpt fallback and make it specific to endpoint + model, temperature: 0.2, presence_penalty: 0, frequency_penalty: 0, max_tokens: 16, }; - try { - this.abortController = new AbortController(); - const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 }); - title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal }); - } catch (e) { - if (e?.message?.toLowerCase()?.includes('abort')) { - logger.debug('[OpenAIClient] Aborted title generation'); - return; - } - logger.error( - '[OpenAIClient] There was an issue generating title with LangChain, trying the old method...', - e, - ); - modelOptions.model = OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo'; + const titleChatCompletion = async () => { + modelOptions.model = model; + if (this.azure) { modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model; this.azureEndpoint = genAzureChatCompletion(this.azure, modelOptions.model); } + const instructionsPayload = [ { role: 'system', @@ -580,10 +599,39 @@ ${convo} ]; try { - title = (await this.sendPayload(instructionsPayload, { modelOptions })).replaceAll('"', ''); + const onProgress = () => ({}); + title = ( + await this.sendPayload(instructionsPayload, { modelOptions, onProgress }) + ).replaceAll('"', ''); } catch (e) { - logger.error('[OpenAIClient] There was another issue generating the title', e); + logger.error( + '[OpenAIClient] There was an issue generating the title with the completion method', + e, + ); } + }; + + if (this.options.titleMethod === 'completion') { + await titleChatCompletion(); + logger.debug('[OpenAIClient] Convo Title: ' + title); + return title; + } + + try { + this.abortController = new AbortController(); + const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 }); + title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal }); + } catch (e) { + if (e?.message?.toLowerCase()?.includes('abort')) { + logger.debug('[OpenAIClient] Aborted title generation'); + return; + } + logger.error( + '[OpenAIClient] There was an issue generating title with LangChain, trying completion method...', + e, + ); + + await titleChatCompletion(); } logger.debug('[OpenAIClient] Convo Title: ' + title); @@ -595,8 +643,11 @@ ${convo} let context = messagesToRefine; let prompt; + // TODO: remove the gpt fallback and make it specific to endpoint const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {}; - const maxContextTokens = getModelMaxTokens(OPENAI_SUMMARY_MODEL) ?? 4095; + const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL; + const maxContextTokens = getModelMaxTokens(model) ?? 4095; + // 3 tokens for the assistant label, and 98 for the summarizer prompt (101) let promptBuffer = 101; @@ -646,7 +697,7 @@ ${convo} logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens); const llm = this.initializeLLM({ - model: OPENAI_SUMMARY_MODEL, + model, temperature: 0.2, context: 'summary', tokenBuffer: initialPromptTokens, diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index 10950d47505..390d8d7a880 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -19,15 +19,22 @@ const initializeClient = async ({ req, res, endpointOption }) => { const CUSTOM_API_KEY = endpointConfig.apiKey; const CUSTOM_BASE_URL = endpointConfig.baseURL; - const contextStrategy = endpointConfig.summarize ? 'summarize' : null; + const customOptions = { + titleConvo: endpointConfig.titleConvo, + titleModel: endpointConfig.titleModel, + forcePrompt: endpointConfig.forcePrompt, + summaryModel: endpointConfig.summaryModel, + defaultModelLabel: endpointConfig.defaultModelLabel, + titleMethod: endpointConfig.titleMethod ?? 'completion', + contextStrategy: endpointConfig.summarize ? 'summarize' : null, + }; const clientOptions = { - contextStrategy, reverseProxyUrl: CUSTOM_BASE_URL ?? null, proxy: PROXY ?? null, req, res, - defaultModelLabel: endpointConfig.defaultModelLabel, + ...customOptions, ...endpointOption, }; diff --git a/api/server/services/Endpoints/openAI/addTitle.js b/api/server/services/Endpoints/openAI/addTitle.js index f630638643f..ab15443f942 100644 --- a/api/server/services/Endpoints/openAI/addTitle.js +++ b/api/server/services/Endpoints/openAI/addTitle.js @@ -7,6 +7,10 @@ const addTitle = async (req, { text, response, client }) => { return; } + if (client.options.titleConvo === false) { + return; + } + // If the request was aborted and is not azure, don't generate the title. if (!client.azure && client.abortController.signal.aborted) { return; From e90e83548d2f040fbc406db12622814efa2728b9 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 09:04:21 -0500 Subject: [PATCH 35/59] refactor: rename `defaultModelLabel` to `modelDisplayLabel` --- api/app/clients/OpenAIClient.js | 2 +- api/server/controllers/AskController.js | 4 ++-- api/server/controllers/EditController.js | 4 ++-- api/server/services/Config/loadConfigEndpoints.js | 4 ++-- api/server/services/Endpoints/custom/initializeClient.js | 2 +- client/src/hooks/Conversations/useGetSender.ts | 4 ++-- client/src/hooks/useChatHelpers.ts | 4 ++-- packages/data-provider/src/schemas.ts | 8 ++++---- packages/data-provider/src/types.ts | 2 +- 9 files changed, 17 insertions(+), 17 deletions(-) diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index dee80cef33e..a6ac2e316ef 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -162,7 +162,7 @@ class OpenAIClient extends BaseClient { endpoint: this.options.endpoint, endpointType: this.options.endpointType, chatGptLabel: this.options.chatGptLabel, - defaultModelLabel: this.options.defaultModelLabel, + modelDisplayLabel: this.options.modelDisplayLabel, }); this.userLabel = this.options.userLabel || 'User'; diff --git a/api/server/controllers/AskController.js b/api/server/controllers/AskController.js index ebdfcc64436..6dc3949966a 100644 --- a/api/server/controllers/AskController.js +++ b/api/server/controllers/AskController.js @@ -9,7 +9,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => { text, endpointOption, conversationId, - defaultModelLabel, + modelDisplayLabel, parentMessageId = null, overrideParentMessageId = null, } = req.body; @@ -26,7 +26,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => { const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model, - defaultModelLabel, + modelDisplayLabel, }); const newConvo = !conversationId; const user = req.user.id; diff --git a/api/server/controllers/EditController.js b/api/server/controllers/EditController.js index 1cbba93ca75..43b82e7193f 100644 --- a/api/server/controllers/EditController.js +++ b/api/server/controllers/EditController.js @@ -10,7 +10,7 @@ const EditController = async (req, res, next, initializeClient) => { generation, endpointOption, conversationId, - defaultModelLabel, + modelDisplayLabel, responseMessageId, isContinued = false, parentMessageId = null, @@ -33,7 +33,7 @@ const EditController = async (req, res, next, initializeClient) => { const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model, - defaultModelLabel, + modelDisplayLabel, }); const userMessageId = parentMessageId; const user = req.user.id; diff --git a/api/server/services/Config/loadConfigEndpoints.js b/api/server/services/Config/loadConfigEndpoints.js index b31addb3b5a..8ec0689b477 100644 --- a/api/server/services/Config/loadConfigEndpoints.js +++ b/api/server/services/Config/loadConfigEndpoints.js @@ -33,12 +33,12 @@ async function loadConfigEndpoints() { for (let i = 0; i < customEndpoints.length; i++) { const endpoint = customEndpoints[i]; - const { baseURL, apiKey, name, defaultModelLabel } = endpoint; + const { baseURL, apiKey, name, modelDisplayLabel } = endpoint; endpointsConfig[name] = { type: EModelEndpoint.custom, userProvide: isUserProvided(apiKey), userProvideURL: isUserProvided(baseURL), - defaultModelLabel, + modelDisplayLabel, }; } } diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index 390d8d7a880..9fcb2915225 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -24,7 +24,7 @@ const initializeClient = async ({ req, res, endpointOption }) => { titleModel: endpointConfig.titleModel, forcePrompt: endpointConfig.forcePrompt, summaryModel: endpointConfig.summaryModel, - defaultModelLabel: endpointConfig.defaultModelLabel, + modelDisplayLabel: endpointConfig.modelDisplayLabel, titleMethod: endpointConfig.titleMethod ?? 'completion', contextStrategy: endpointConfig.summarize ? 'summarize' : null, }; diff --git a/client/src/hooks/Conversations/useGetSender.ts b/client/src/hooks/Conversations/useGetSender.ts index 4afacfa530b..0b8ed9ffea3 100644 --- a/client/src/hooks/Conversations/useGetSender.ts +++ b/client/src/hooks/Conversations/useGetSender.ts @@ -7,8 +7,8 @@ export default function useGetSender() { const { data: endpointsConfig = {} as TEndpointsConfig } = useGetEndpointsQuery(); return useCallback( (endpointOption: TEndpointOption) => { - const { defaultModelLabel } = endpointsConfig[endpointOption.endpoint ?? ''] ?? {}; - return getResponseSender({ ...endpointOption, defaultModelLabel }); + const { modelDisplayLabel } = endpointsConfig[endpointOption.endpoint ?? ''] ?? {}; + return getResponseSender({ ...endpointOption, modelDisplayLabel }); }, [endpointsConfig], ); diff --git a/client/src/hooks/useChatHelpers.ts b/client/src/hooks/useChatHelpers.ts index bffd4de8bb0..1cf3c16fea6 100644 --- a/client/src/hooks/useChatHelpers.ts +++ b/client/src/hooks/useChatHelpers.ts @@ -161,12 +161,12 @@ export default function useChatHelpers(index = 0, paramId: string | undefined) { conversation: conversation ?? {}, }); - const { defaultModelLabel } = endpointsConfig[endpoint ?? ''] ?? {}; + const { modelDisplayLabel } = endpointsConfig[endpoint ?? ''] ?? {}; const endpointOption = { ...convo, endpoint, endpointType, - defaultModelLabel, + modelDisplayLabel, key: getExpiry(), } as TEndpointOption; const responseSender = getSender({ model: conversation?.model, ...endpointOption }); diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 5b26d7e3586..582dfb458c3 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -691,7 +691,7 @@ export const parseConvo = ({ export type TEndpointOption = { endpoint: EModelEndpoint; endpointType?: EModelEndpoint; - defaultModelLabel?: string; + modelDisplayLabel?: string; model?: string | null; promptPrefix?: string; temperature?: number; @@ -702,7 +702,7 @@ export type TEndpointOption = { }; export const getResponseSender = (endpointOption: TEndpointOption): string => { - const { model, endpoint, endpointType, defaultModelLabel, chatGptLabel, modelLabel, jailbreak } = + const { model, endpoint, endpointType, modelDisplayLabel, chatGptLabel, modelLabel, jailbreak } = endpointOption; if ( @@ -760,8 +760,8 @@ export const getResponseSender = (endpointOption: TEndpointOption): string => { return 'GPT-3.5'; } else if (model && model.includes('gpt-4')) { return 'GPT-4'; - } else if (defaultModelLabel) { - return defaultModelLabel; + } else if (modelDisplayLabel) { + return modelDisplayLabel; } return 'AI'; diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index 4693929deb1..0fc6781aef8 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -125,9 +125,9 @@ export type TConfig = { azure?: boolean; availableTools?: []; plugins?: Record; - defaultModelLabel?: string; name?: string; iconURL?: string; + modelDisplayLabel?: string; userProvide?: boolean | null; userProvideURL?: boolean | null; }; From 0a2bbb59f3b32851e6dc50d90ebd9cf8e10ca9c9 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 09:46:07 -0500 Subject: [PATCH 36/59] refactor(data-provider): separate concerns from `schemas` into `parsers`, `config`, and fix imports elsewhere --- packages/data-provider/src/config.ts | 180 ++++++++ packages/data-provider/src/createPayload.ts | 4 +- packages/data-provider/src/index.ts | 7 +- packages/data-provider/src/parsers.ts | 225 ++++++++++ packages/data-provider/src/schemas.ts | 460 ++------------------ packages/data-provider/src/types.ts | 21 +- 6 files changed, 473 insertions(+), 424 deletions(-) create mode 100644 packages/data-provider/src/config.ts create mode 100644 packages/data-provider/src/parsers.ts diff --git a/packages/data-provider/src/config.ts b/packages/data-provider/src/config.ts new file mode 100644 index 00000000000..ae5144edf39 --- /dev/null +++ b/packages/data-provider/src/config.ts @@ -0,0 +1,180 @@ +import { z } from 'zod'; +import { EModelEndpoint } from './schemas'; + +export const endpointSchema = z.object({ + name: z.string(), + apiKey: z.string(), + baseURL: z.string(), + models: z.object({ + default: z.array(z.string()).min(1), + fetch: z.boolean().optional(), + }), + titleConvo: z.boolean().optional(), + titleMethod: z.union([z.literal('completion'), z.literal('functions')]).optional(), + titleModel: z.string().optional(), + summarize: z.boolean().optional(), + summaryModel: z.string().optional(), + forcePrompt: z.boolean().optional(), + modelDisplayLabel: z.string().optional(), +}); + +export const configSchema = z.object({ + version: z.string(), + cache: z.boolean(), + endpoints: z.object({ + custom: z.array(endpointSchema.partial()), + }), +}); + +export enum KnownEndpoints { + mistral = 'mistral', + openrouter = 'openrouter', +} + +export const defaultEndpoints: EModelEndpoint[] = [ + EModelEndpoint.openAI, + EModelEndpoint.assistant, + EModelEndpoint.azureOpenAI, + EModelEndpoint.bingAI, + EModelEndpoint.chatGPTBrowser, + EModelEndpoint.gptPlugins, + EModelEndpoint.google, + EModelEndpoint.anthropic, + EModelEndpoint.custom, +]; + +export const alternateName = { + [EModelEndpoint.openAI]: 'OpenAI', + [EModelEndpoint.assistant]: 'Assistants', + [EModelEndpoint.azureOpenAI]: 'Azure OpenAI', + [EModelEndpoint.bingAI]: 'Bing', + [EModelEndpoint.chatGPTBrowser]: 'ChatGPT', + [EModelEndpoint.gptPlugins]: 'Plugins', + [EModelEndpoint.google]: 'Google', + [EModelEndpoint.anthropic]: 'Anthropic', + [EModelEndpoint.custom]: 'Custom', +}; + +export const defaultModels = { + [EModelEndpoint.google]: [ + 'gemini-pro', + 'gemini-pro-vision', + 'chat-bison', + 'chat-bison-32k', + 'codechat-bison', + 'codechat-bison-32k', + 'text-bison', + 'text-bison-32k', + 'text-unicorn', + 'code-gecko', + 'code-bison', + 'code-bison-32k', + ], + [EModelEndpoint.anthropic]: [ + 'claude-2.1', + 'claude-2', + 'claude-1.2', + 'claude-1', + 'claude-1-100k', + 'claude-instant-1', + 'claude-instant-1-100k', + ], + [EModelEndpoint.openAI]: [ + 'gpt-3.5-turbo-16k-0613', + 'gpt-3.5-turbo-16k', + 'gpt-4-1106-preview', + 'gpt-3.5-turbo', + 'gpt-3.5-turbo-1106', + 'gpt-4-vision-preview', + 'gpt-4', + 'gpt-3.5-turbo-instruct-0914', + 'gpt-3.5-turbo-0613', + 'gpt-3.5-turbo-0301', + 'gpt-3.5-turbo-instruct', + 'gpt-4-0613', + 'text-davinci-003', + 'gpt-4-0314', + ], +}; + +export const EndpointURLs: { [key in EModelEndpoint]: string } = { + [EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`, + [EModelEndpoint.bingAI]: `/api/ask/${EModelEndpoint.bingAI}`, + [EModelEndpoint.google]: `/api/ask/${EModelEndpoint.google}`, + [EModelEndpoint.custom]: `/api/ask/${EModelEndpoint.custom}`, + [EModelEndpoint.anthropic]: `/api/ask/${EModelEndpoint.anthropic}`, + [EModelEndpoint.gptPlugins]: `/api/ask/${EModelEndpoint.gptPlugins}`, + [EModelEndpoint.azureOpenAI]: `/api/ask/${EModelEndpoint.azureOpenAI}`, + [EModelEndpoint.chatGPTBrowser]: `/api/ask/${EModelEndpoint.chatGPTBrowser}`, + [EModelEndpoint.assistant]: '/api/assistants/chat', +}; + +export const modularEndpoints = new Set([ + EModelEndpoint.gptPlugins, + EModelEndpoint.anthropic, + EModelEndpoint.google, + EModelEndpoint.openAI, + EModelEndpoint.azureOpenAI, + EModelEndpoint.custom, +]); + +export const supportsFiles = { + [EModelEndpoint.openAI]: true, + [EModelEndpoint.google]: true, + [EModelEndpoint.assistant]: true, + [EModelEndpoint.azureOpenAI]: true, + [EModelEndpoint.custom]: true, +}; + +export const supportsBalanceCheck = { + [EModelEndpoint.openAI]: true, + [EModelEndpoint.azureOpenAI]: true, + [EModelEndpoint.gptPlugins]: true, + [EModelEndpoint.custom]: true, +}; + +export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision']; + +/** + * Enum for cache keys. + */ +export enum CacheKeys { + /** + * Key for the config store namespace. + */ + CONFIG_STORE = 'configStore', + /** + * Key for the plugins cache. + */ + PLUGINS = 'plugins', + /** + * Key for the model config cache. + */ + MODELS_CONFIG = 'modelsConfig', + /** + * Key for the default endpoint config cache. + */ + ENDPOINT_CONFIG = 'endpointsConfig', + /** + * Key for the custom config cache. + */ + CUSTOM_CONFIG = 'customConfig', + /** + * Key for the override config cache. + */ + OVERRIDE_CONFIG = 'overrideConfig', +} + +/** + * Enum for authentication keys. + */ +export enum AuthKeys { + /** + * Key for the Service Account to use Vertex AI. + */ + GOOGLE_SERVICE_KEY = 'GOOGLE_SERVICE_KEY', + /** + * API key to use Google Generative AI. + */ + GOOGLE_API_KEY = 'GOOGLE_API_KEY', +} diff --git a/packages/data-provider/src/createPayload.ts b/packages/data-provider/src/createPayload.ts index e7a2fc46244..998b5774b5f 100644 --- a/packages/data-provider/src/createPayload.ts +++ b/packages/data-provider/src/createPayload.ts @@ -1,6 +1,6 @@ -import { tConvoUpdateSchema } from './schemas'; import type { TSubmission, TMessage, TEndpointOption } from './types'; -import { EModelEndpoint, EndpointURLs } from './types'; +import { tConvoUpdateSchema, EModelEndpoint } from './schemas'; +import { EndpointURLs } from './config'; export default function createPayload(submission: TSubmission) { const { conversation, message, messages, endpointOption, isEdited, isContinued } = submission; diff --git a/packages/data-provider/src/index.ts b/packages/data-provider/src/index.ts index 4b45891dc92..318829adafa 100644 --- a/packages/data-provider/src/index.ts +++ b/packages/data-provider/src/index.ts @@ -1,8 +1,13 @@ -/* types/schemas/schema helpers */ +/* config */ +export * from './config'; +/* schema helpers */ +export * from './parsers'; +/* types (exports schemas from `./types` as they contain needed in other defs) */ export * from './types'; export * from './types/assistants'; export * from './types/files'; export * from './types/mutations'; +/* query/mutation keys */ export * from './keys'; /* api call helpers */ export * from './headers-helpers'; diff --git a/packages/data-provider/src/parsers.ts b/packages/data-provider/src/parsers.ts new file mode 100644 index 00000000000..89872b1b0a4 --- /dev/null +++ b/packages/data-provider/src/parsers.ts @@ -0,0 +1,225 @@ +import type { TConversation, TPreset } from './schemas'; +import type { TEndpointOption } from './types'; +import { + EModelEndpoint, + openAISchema, + googleSchema, + bingAISchema, + anthropicSchema, + chatGPTBrowserSchema, + gptPluginsSchema, + assistantSchema, + compactOpenAISchema, + compactGoogleSchema, + compactAnthropicSchema, + compactChatGPTSchema, + compactPluginsSchema, +} from './schemas'; +import { alternateName } from './config'; + +type EndpointSchema = + | typeof openAISchema + | typeof googleSchema + | typeof bingAISchema + | typeof anthropicSchema + | typeof chatGPTBrowserSchema + | typeof gptPluginsSchema + | typeof assistantSchema; + +const endpointSchemas: Record = { + [EModelEndpoint.openAI]: openAISchema, + [EModelEndpoint.azureOpenAI]: openAISchema, + [EModelEndpoint.custom]: openAISchema, + [EModelEndpoint.google]: googleSchema, + [EModelEndpoint.bingAI]: bingAISchema, + [EModelEndpoint.anthropic]: anthropicSchema, + [EModelEndpoint.chatGPTBrowser]: chatGPTBrowserSchema, + [EModelEndpoint.gptPlugins]: gptPluginsSchema, + [EModelEndpoint.assistant]: assistantSchema, +}; + +// const schemaCreators: Record EndpointSchema> = { +// [EModelEndpoint.google]: createGoogleSchema, +// }; + +export function getFirstDefinedValue(possibleValues: string[]) { + let returnValue; + for (const value of possibleValues) { + if (value) { + returnValue = value; + break; + } + } + return returnValue; +} + +export type TPossibleValues = { + models: string[]; + secondaryModels?: string[]; +}; + +export const parseConvo = ({ + endpoint, + endpointType, + conversation, + possibleValues, +}: { + endpoint: EModelEndpoint; + endpointType?: EModelEndpoint; + conversation: Partial; + possibleValues?: TPossibleValues; + // TODO: POC for default schema + // defaultSchema?: Partial, +}) => { + let schema = endpointSchemas[endpoint]; + + if (!schema && !endpointType) { + throw new Error(`Unknown endpoint: ${endpoint}`); + } else if (!schema && endpointType) { + schema = endpointSchemas[endpointType]; + } + + // if (defaultSchema && schemaCreators[endpoint]) { + // schema = schemaCreators[endpoint](defaultSchema); + // } + + const convo = schema.parse(conversation) as TConversation; + const { models, secondaryModels } = possibleValues ?? {}; + + if (models && convo) { + convo.model = getFirstDefinedValue(models) ?? convo.model; + } + + if (secondaryModels && convo.agentOptions) { + convo.agentOptions.model = getFirstDefinedValue(secondaryModels) ?? convo.agentOptions.model; + } + + return convo; +}; + +export const getResponseSender = (endpointOption: TEndpointOption): string => { + const { model, endpoint, endpointType, modelDisplayLabel, chatGptLabel, modelLabel, jailbreak } = + endpointOption; + + if ( + [ + EModelEndpoint.openAI, + EModelEndpoint.azureOpenAI, + EModelEndpoint.gptPlugins, + EModelEndpoint.chatGPTBrowser, + ].includes(endpoint) + ) { + if (chatGptLabel) { + return chatGptLabel; + } else if (model && model.includes('gpt-3')) { + return 'GPT-3.5'; + } else if (model && model.includes('gpt-4')) { + return 'GPT-4'; + } else if (model && model.includes('mistral')) { + return 'Mistral'; + } + return alternateName[endpoint] ?? 'ChatGPT'; + } + + if (endpoint === EModelEndpoint.bingAI) { + return jailbreak ? 'Sydney' : 'BingAI'; + } + + if (endpoint === EModelEndpoint.anthropic) { + return modelLabel ?? 'Claude'; + } + + if (endpoint === EModelEndpoint.google) { + if (modelLabel) { + return modelLabel; + } else if (model && model.includes('gemini')) { + return 'Gemini'; + } else if (model && model.includes('code')) { + return 'Codey'; + } + + return 'PaLM2'; + } + + if (endpoint === EModelEndpoint.custom || endpointType === EModelEndpoint.custom) { + if (modelLabel) { + return modelLabel; + } else if (chatGptLabel) { + return chatGptLabel; + } else if (model && model.includes('mistral')) { + return 'Mistral'; + } else if (model && model.includes('gpt-3')) { + return 'GPT-3.5'; + } else if (model && model.includes('gpt-4')) { + return 'GPT-4'; + } else if (modelDisplayLabel) { + return modelDisplayLabel; + } + + return 'AI'; + } + + return ''; +}; + +type CompactEndpointSchema = + | typeof compactOpenAISchema + | typeof assistantSchema + | typeof compactGoogleSchema + | typeof bingAISchema + | typeof compactAnthropicSchema + | typeof compactChatGPTSchema + | typeof compactPluginsSchema; + +const compactEndpointSchemas: Record = { + openAI: compactOpenAISchema, + azureOpenAI: compactOpenAISchema, + custom: compactOpenAISchema, + assistant: assistantSchema, + google: compactGoogleSchema, + /* BingAI needs all fields */ + bingAI: bingAISchema, + anthropic: compactAnthropicSchema, + chatGPTBrowser: compactChatGPTSchema, + gptPlugins: compactPluginsSchema, +}; + +export const parseCompactConvo = ({ + endpoint, + endpointType, + conversation, + possibleValues, +}: { + endpoint?: EModelEndpoint; + endpointType?: EModelEndpoint; + conversation: Partial; + possibleValues?: TPossibleValues; + // TODO: POC for default schema + // defaultSchema?: Partial, +}) => { + if (!endpoint) { + throw new Error(`undefined endpoint: ${endpoint}`); + } + + let schema = compactEndpointSchemas[endpoint]; + + if (!schema && !endpointType) { + throw new Error(`Unknown endpoint: ${endpoint}`); + } else if (!schema && endpointType) { + schema = compactEndpointSchemas[endpointType]; + } + + const convo = schema.parse(conversation) as TConversation; + // const { models, secondaryModels } = possibleValues ?? {}; + const { models } = possibleValues ?? {}; + + if (models && convo) { + convo.model = getFirstDefinedValue(models) ?? convo.model; + } + + // if (secondaryModels && convo.agentOptions) { + // convo.agentOptionmodel = getFirstDefinedValue(secondaryModels) ?? convo.agentOptionmodel; + // } + + return convo; +}; diff --git a/packages/data-provider/src/schemas.ts b/packages/data-provider/src/schemas.ts index 582dfb458c3..fb4979812ce 100644 --- a/packages/data-provider/src/schemas.ts +++ b/packages/data-provider/src/schemas.ts @@ -12,82 +12,6 @@ export enum EModelEndpoint { custom = 'custom', } -export enum KnownEndpoints { - mistral = 'mistral', - openrouter = 'openrouter', -} - -export const defaultEndpoints: EModelEndpoint[] = [ - EModelEndpoint.openAI, - EModelEndpoint.assistant, - EModelEndpoint.azureOpenAI, - EModelEndpoint.bingAI, - EModelEndpoint.chatGPTBrowser, - EModelEndpoint.gptPlugins, - EModelEndpoint.google, - EModelEndpoint.anthropic, - EModelEndpoint.custom, -]; - -export const defaultModels = { - [EModelEndpoint.google]: [ - 'gemini-pro', - 'gemini-pro-vision', - 'chat-bison', - 'chat-bison-32k', - 'codechat-bison', - 'codechat-bison-32k', - 'text-bison', - 'text-bison-32k', - 'text-unicorn', - 'code-gecko', - 'code-bison', - 'code-bison-32k', - ], - [EModelEndpoint.anthropic]: [ - 'claude-2.1', - 'claude-2', - 'claude-1.2', - 'claude-1', - 'claude-1-100k', - 'claude-instant-1', - 'claude-instant-1-100k', - ], - [EModelEndpoint.openAI]: [ - 'gpt-3.5-turbo-16k-0613', - 'gpt-3.5-turbo-16k', - 'gpt-4-1106-preview', - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-1106', - 'gpt-4-vision-preview', - 'gpt-4', - 'gpt-3.5-turbo-instruct-0914', - 'gpt-3.5-turbo-0613', - 'gpt-3.5-turbo-0301', - 'gpt-3.5-turbo-instruct', - 'gpt-4-0613', - 'text-davinci-003', - 'gpt-4-0314', - ], -}; - -export const alternateName = { - [EModelEndpoint.openAI]: 'OpenAI', - [EModelEndpoint.assistant]: 'Assistants', - [EModelEndpoint.azureOpenAI]: 'Azure OpenAI', - [EModelEndpoint.bingAI]: 'Bing', - [EModelEndpoint.chatGPTBrowser]: 'ChatGPT', - [EModelEndpoint.gptPlugins]: 'Plugins', - [EModelEndpoint.google]: 'Google', - [EModelEndpoint.anthropic]: 'Anthropic', - [EModelEndpoint.custom]: 'Custom', -}; - -export enum AuthKeys { - GOOGLE_SERVICE_KEY = 'GOOGLE_SERVICE_KEY', - GOOGLE_API_KEY = 'GOOGLE_API_KEY', -} - export const endpointSettings = { [EModelEndpoint.google]: { model: { @@ -124,44 +48,6 @@ export const endpointSettings = { const google = endpointSettings[EModelEndpoint.google]; -export const EndpointURLs: { [key in EModelEndpoint]: string } = { - [EModelEndpoint.openAI]: `/api/ask/${EModelEndpoint.openAI}`, - [EModelEndpoint.bingAI]: `/api/ask/${EModelEndpoint.bingAI}`, - [EModelEndpoint.google]: `/api/ask/${EModelEndpoint.google}`, - [EModelEndpoint.custom]: `/api/ask/${EModelEndpoint.custom}`, - [EModelEndpoint.anthropic]: `/api/ask/${EModelEndpoint.anthropic}`, - [EModelEndpoint.gptPlugins]: `/api/ask/${EModelEndpoint.gptPlugins}`, - [EModelEndpoint.azureOpenAI]: `/api/ask/${EModelEndpoint.azureOpenAI}`, - [EModelEndpoint.chatGPTBrowser]: `/api/ask/${EModelEndpoint.chatGPTBrowser}`, - [EModelEndpoint.assistant]: '/api/assistants/chat', -}; - -export const modularEndpoints = new Set([ - EModelEndpoint.gptPlugins, - EModelEndpoint.anthropic, - EModelEndpoint.google, - EModelEndpoint.openAI, - EModelEndpoint.azureOpenAI, - EModelEndpoint.custom, -]); - -export const supportsFiles = { - [EModelEndpoint.openAI]: true, - [EModelEndpoint.google]: true, - [EModelEndpoint.assistant]: true, - [EModelEndpoint.azureOpenAI]: true, - [EModelEndpoint.custom]: true, -}; - -export const supportsBalanceCheck = { - [EModelEndpoint.openAI]: true, - [EModelEndpoint.azureOpenAI]: true, - [EModelEndpoint.gptPlugins]: true, - [EModelEndpoint.custom]: true, -}; - -export const visionModels = ['gpt-4-vision', 'llava-13b', 'gemini-pro-vision']; - export const eModelEndpointSchema = z.nativeEnum(EModelEndpoint); export const extendedModelEndpointSchema = z.union([eModelEndpointSchema, z.string()]); @@ -414,56 +300,6 @@ export const googleSchema = tConversationSchema topK: google.topK.default, })); -// const createGoogleSchema = (customGoogle: DefaultSchemaValues) => { -// const defaults = { ...google, ...customGoogle }; -// return tConversationSchema -// .pick({ -// model: true, -// modelLabel: true, -// promptPrefix: true, -// examples: true, -// temperature: true, -// maxOutputTokens: true, -// topP: true, -// topK: true, -// }) -// .transform((obj) => { -// const isGeminiPro = obj?.model?.toLowerCase()?.includes('gemini-pro'); - -// const maxOutputTokensMax = isGeminiPro -// ? defaults.maxOutputTokens.maxGeminiPro -// : defaults.maxOutputTokens.max; -// const maxOutputTokensDefault = isGeminiPro -// ? defaults.maxOutputTokens.defaultGeminiPro -// : defaults.maxOutputTokens.default; - -// let maxOutputTokens = obj.maxOutputTokens ?? maxOutputTokensDefault; -// maxOutputTokens = Math.min(maxOutputTokens, maxOutputTokensMax); - -// return { -// ...obj, -// model: obj.model ?? defaults.model.default, -// modelLabel: obj.modelLabel ?? null, -// promptPrefix: obj.promptPrefix ?? null, -// examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }], -// temperature: obj.temperature ?? defaults.temperature.default, -// maxOutputTokens, -// topP: obj.topP ?? defaults.topP.default, -// topK: obj.topK ?? defaults.topK.default, -// }; -// }) -// .catch(() => ({ -// model: defaults.model.default, -// modelLabel: null, -// promptPrefix: null, -// examples: [{ input: { content: '' }, output: { content: '' } }], -// temperature: defaults.temperature.default, -// maxOutputTokens: defaults.maxOutputTokens.default, -// topP: defaults.topP.default, -// topK: defaults.topK.default, -// })); -// }; - export const bingAISchema = tConversationSchema .pick({ jailbreak: true, @@ -608,168 +444,6 @@ export const assistantSchema = tConversationSchema .transform(removeNullishValues) .catch(() => ({})); -type EndpointSchema = - | typeof openAISchema - | typeof googleSchema - | typeof bingAISchema - | typeof anthropicSchema - | typeof chatGPTBrowserSchema - | typeof gptPluginsSchema - | typeof assistantSchema; - -const endpointSchemas: Record = { - [EModelEndpoint.openAI]: openAISchema, - [EModelEndpoint.azureOpenAI]: openAISchema, - [EModelEndpoint.custom]: openAISchema, - [EModelEndpoint.google]: googleSchema, - [EModelEndpoint.bingAI]: bingAISchema, - [EModelEndpoint.anthropic]: anthropicSchema, - [EModelEndpoint.chatGPTBrowser]: chatGPTBrowserSchema, - [EModelEndpoint.gptPlugins]: gptPluginsSchema, - [EModelEndpoint.assistant]: assistantSchema, -}; - -// const schemaCreators: Record EndpointSchema> = { -// [EModelEndpoint.google]: createGoogleSchema, -// }; - -export function getFirstDefinedValue(possibleValues: string[]) { - let returnValue; - for (const value of possibleValues) { - if (value) { - returnValue = value; - break; - } - } - return returnValue; -} - -export type TPossibleValues = { - models: string[]; - secondaryModels?: string[]; -}; - -export const parseConvo = ({ - endpoint, - endpointType, - conversation, - possibleValues, -}: { - endpoint: EModelEndpoint; - endpointType?: EModelEndpoint; - conversation: Partial; - possibleValues?: TPossibleValues; - // TODO: POC for default schema - // defaultSchema?: Partial, -}) => { - let schema = endpointSchemas[endpoint]; - - if (!schema && !endpointType) { - throw new Error(`Unknown endpoint: ${endpoint}`); - } else if (!schema && endpointType) { - schema = endpointSchemas[endpointType]; - } - - // if (defaultSchema && schemaCreators[endpoint]) { - // schema = schemaCreators[endpoint](defaultSchema); - // } - - const convo = schema.parse(conversation) as TConversation; - const { models, secondaryModels } = possibleValues ?? {}; - - if (models && convo) { - convo.model = getFirstDefinedValue(models) ?? convo.model; - } - - if (secondaryModels && convo.agentOptions) { - convo.agentOptions.model = getFirstDefinedValue(secondaryModels) ?? convo.agentOptions.model; - } - - return convo; -}; - -export type TEndpointOption = { - endpoint: EModelEndpoint; - endpointType?: EModelEndpoint; - modelDisplayLabel?: string; - model?: string | null; - promptPrefix?: string; - temperature?: number; - chatGptLabel?: string | null; - modelLabel?: string | null; - jailbreak?: boolean; - key?: string | null; -}; - -export const getResponseSender = (endpointOption: TEndpointOption): string => { - const { model, endpoint, endpointType, modelDisplayLabel, chatGptLabel, modelLabel, jailbreak } = - endpointOption; - - if ( - [ - EModelEndpoint.openAI, - EModelEndpoint.azureOpenAI, - EModelEndpoint.gptPlugins, - EModelEndpoint.chatGPTBrowser, - ].includes(endpoint) - ) { - if (chatGptLabel) { - return chatGptLabel; - } else if (model && model.includes('gpt-3')) { - return 'GPT-3.5'; - } else if (model && model.includes('gpt-4')) { - return 'GPT-4'; - } else if (model && model.includes('mistral')) { - return 'Mistral'; - } - return alternateName[endpoint] ?? 'ChatGPT'; - } - - if (endpoint === EModelEndpoint.bingAI) { - return jailbreak ? 'Sydney' : 'BingAI'; - } - - if (endpoint === EModelEndpoint.anthropic) { - return modelLabel ?? 'Claude'; - } - - if (endpoint === EModelEndpoint.google) { - if (modelLabel) { - return modelLabel; - } else if (model && model.includes('gemini')) { - return 'Gemini'; - } else if (model && model.includes('code')) { - return 'Codey'; - } - - return 'PaLM2'; - } - - if ( - endpoint === EModelEndpoint.custom || - endpointType === EModelEndpoint.custom || - !defaultEndpoints.includes(endpoint) - ) { - if (modelLabel) { - return modelLabel; - } else if (chatGptLabel) { - return chatGptLabel; - } else if (model && model.includes('mistral')) { - return 'Mistral'; - } else if (model && model.includes('gpt-3')) { - return 'GPT-3.5'; - } else if (model && model.includes('gpt-4')) { - return 'GPT-4'; - } else if (modelDisplayLabel) { - return modelDisplayLabel; - } - - return 'AI'; - } - - return ''; -}; - export const compactOpenAISchema = tConversationSchema .pick({ model: true, @@ -935,94 +609,52 @@ export const compactPluginsSchema = tConversationSchema }) .catch(() => ({})); -type CompactEndpointSchema = - | typeof compactOpenAISchema - | typeof assistantSchema - | typeof compactGoogleSchema - | typeof bingAISchema - | typeof compactAnthropicSchema - | typeof compactChatGPTSchema - | typeof compactPluginsSchema; - -const compactEndpointSchemas: Record = { - openAI: compactOpenAISchema, - azureOpenAI: compactOpenAISchema, - custom: compactOpenAISchema, - assistant: assistantSchema, - google: compactGoogleSchema, - /* BingAI needs all fields */ - bingAI: bingAISchema, - anthropic: compactAnthropicSchema, - chatGPTBrowser: compactChatGPTSchema, - gptPlugins: compactPluginsSchema, -}; - -export const parseCompactConvo = ({ - endpoint, - endpointType, - conversation, - possibleValues, -}: { - endpoint?: EModelEndpoint; - endpointType?: EModelEndpoint; - conversation: Partial; - possibleValues?: TPossibleValues; - // TODO: POC for default schema - // defaultSchema?: Partial, -}) => { - if (!endpoint) { - throw new Error(`undefined endpoint: ${endpoint}`); - } - - let schema = compactEndpointSchemas[endpoint]; - - if (!schema && !endpointType) { - throw new Error(`Unknown endpoint: ${endpoint}`); - } else if (!schema && endpointType) { - schema = compactEndpointSchemas[endpointType]; - } - - const convo = schema.parse(conversation) as TConversation; - // const { models, secondaryModels } = possibleValues ?? {}; - const { models } = possibleValues ?? {}; - - if (models && convo) { - convo.model = getFirstDefinedValue(models) ?? convo.model; - } +// const createGoogleSchema = (customGoogle: DefaultSchemaValues) => { +// const defaults = { ...google, ...customGoogle }; +// return tConversationSchema +// .pick({ +// model: true, +// modelLabel: true, +// promptPrefix: true, +// examples: true, +// temperature: true, +// maxOutputTokens: true, +// topP: true, +// topK: true, +// }) +// .transform((obj) => { +// const isGeminiPro = obj?.model?.toLowerCase()?.includes('gemini-pro'); - // if (secondaryModels && convo.agentOptions) { - // convo.agentOptionmodel = getFirstDefinedValue(secondaryModels) ?? convo.agentOptionmodel; - // } +// const maxOutputTokensMax = isGeminiPro +// ? defaults.maxOutputTokens.maxGeminiPro +// : defaults.maxOutputTokens.max; +// const maxOutputTokensDefault = isGeminiPro +// ? defaults.maxOutputTokens.defaultGeminiPro +// : defaults.maxOutputTokens.default; - return convo; -}; +// let maxOutputTokens = obj.maxOutputTokens ?? maxOutputTokensDefault; +// maxOutputTokens = Math.min(maxOutputTokens, maxOutputTokensMax); -/** - * Enum for cache keys. - */ -export enum CacheKeys { - /** - * Key for the config store namespace. - */ - CONFIG_STORE = 'configStore', - /** - * Key for the plugins cache. - */ - PLUGINS = 'plugins', - /** - * Key for the model config cache. - */ - MODELS_CONFIG = 'modelsConfig', - /** - * Key for the default endpoint config cache. - */ - ENDPOINT_CONFIG = 'endpointsConfig', - /** - * Key for the custom config cache. - */ - CUSTOM_CONFIG = 'customConfig', - /** - * Key for the override config cache. - */ - OVERRIDE_CONFIG = 'overrideConfig', -} +// return { +// ...obj, +// model: obj.model ?? defaults.model.default, +// modelLabel: obj.modelLabel ?? null, +// promptPrefix: obj.promptPrefix ?? null, +// examples: obj.examples ?? [{ input: { content: '' }, output: { content: '' } }], +// temperature: obj.temperature ?? defaults.temperature.default, +// maxOutputTokens, +// topP: obj.topP ?? defaults.topP.default, +// topK: obj.topK ?? defaults.topK.default, +// }; +// }) +// .catch(() => ({ +// model: defaults.model.default, +// modelLabel: null, +// promptPrefix: null, +// examples: [{ input: { content: '' }, output: { content: '' } }], +// temperature: defaults.temperature.default, +// maxOutputTokens: defaults.maxOutputTokens.default, +// topP: defaults.topP.default, +// topK: defaults.topK.default, +// })); +// }; diff --git a/packages/data-provider/src/types.ts b/packages/data-provider/src/types.ts index 0fc6781aef8..0921cbbe97a 100644 --- a/packages/data-provider/src/types.ts +++ b/packages/data-provider/src/types.ts @@ -1,11 +1,5 @@ import OpenAI from 'openai'; -import type { - TResPlugin, - TMessage, - TConversation, - TEndpointOption, - EModelEndpoint, -} from './schemas'; +import type { TResPlugin, TMessage, TConversation, EModelEndpoint } from './schemas'; export type TOpenAIMessage = OpenAI.Chat.ChatCompletionMessageParam; export type TOpenAIFunction = OpenAI.Chat.ChatCompletionCreateParams.Function; @@ -17,6 +11,19 @@ export type TMessages = TMessage[]; export type TMessagesAtom = TMessages | null; +export type TEndpointOption = { + endpoint: EModelEndpoint; + endpointType?: EModelEndpoint; + modelDisplayLabel?: string; + model?: string | null; + promptPrefix?: string; + temperature?: number; + chatGptLabel?: string | null; + modelLabel?: string | null; + jailbreak?: boolean; + key?: string | null; +}; + export type TSubmission = { plugin?: TResPlugin; plugins?: TResPlugin[]; From df00c284106be614534ad248c2195530a2375687 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 10:53:45 -0500 Subject: [PATCH 37/59] feat: `iconURL` and extract environment variables from custom endpoint config values --- .../services/Config/loadConfigEndpoints.js | 13 +++-- api/server/utils/handleText.js | 14 ++++++ api/server/utils/handleText.spec.js | 49 ++++++++++++++++++- 3 files changed, 71 insertions(+), 5 deletions(-) diff --git a/api/server/services/Config/loadConfigEndpoints.js b/api/server/services/Config/loadConfigEndpoints.js index 8ec0689b477..1b435e144e9 100644 --- a/api/server/services/Config/loadConfigEndpoints.js +++ b/api/server/services/Config/loadConfigEndpoints.js @@ -1,6 +1,6 @@ const { CacheKeys, EModelEndpoint } = require('librechat-data-provider'); +const { isUserProvided, extractEnvVariable } = require('~/server/utils'); const loadCustomConfig = require('./loadCustomConfig'); -const { isUserProvided } = require('~/server/utils'); const { getLogStores } = require('~/cache'); /** @@ -33,12 +33,17 @@ async function loadConfigEndpoints() { for (let i = 0; i < customEndpoints.length; i++) { const endpoint = customEndpoints[i]; - const { baseURL, apiKey, name, modelDisplayLabel } = endpoint; + const { baseURL, apiKey, name, iconURL, modelDisplayLabel } = endpoint; + + const resolvedApiKey = extractEnvVariable(apiKey); + const resolvedBaseURL = extractEnvVariable(baseURL); + endpointsConfig[name] = { type: EModelEndpoint.custom, - userProvide: isUserProvided(apiKey), - userProvideURL: isUserProvided(baseURL), + userProvide: isUserProvided(resolvedApiKey), + userProvideURL: isUserProvided(resolvedBaseURL), modelDisplayLabel, + iconURL, }; } } diff --git a/api/server/utils/handleText.js b/api/server/utils/handleText.js index a1eece0e6e6..b8d17106622 100644 --- a/api/server/utils/handleText.js +++ b/api/server/utils/handleText.js @@ -173,6 +173,19 @@ function isEnabled(value) { */ const isUserProvided = (value) => value === 'user_provided'; +/** + * Extracts the value of an environment variable from a string. + * @param {string} value - The value to be processed, possibly containing an env variable placeholder. + * @returns {string} - The actual value from the environment variable or the original value. + */ +function extractEnvVariable(value) { + const envVarMatch = value.match(/^\${(.+)}$/); + if (envVarMatch) { + return process.env[envVarMatch[1]] || value; + } + return value; +} + module.exports = { createOnProgress, isEnabled, @@ -181,4 +194,5 @@ module.exports = { formatAction, addSpaceIfNeeded, isUserProvided, + extractEnvVariable, }; diff --git a/api/server/utils/handleText.spec.js b/api/server/utils/handleText.spec.js index ea440a89a57..a5566fb1b2b 100644 --- a/api/server/utils/handleText.spec.js +++ b/api/server/utils/handleText.spec.js @@ -1,4 +1,4 @@ -const { isEnabled } = require('./handleText'); +const { isEnabled, extractEnvVariable } = require('./handleText'); describe('isEnabled', () => { test('should return true when input is "true"', () => { @@ -48,4 +48,51 @@ describe('isEnabled', () => { test('should return false when input is an array', () => { expect(isEnabled([])).toBe(false); }); + + describe('extractEnvVariable', () => { + const originalEnv = process.env; + + beforeEach(() => { + jest.resetModules(); + process.env = { ...originalEnv }; + }); + + afterAll(() => { + process.env = originalEnv; + }); + + test('should return the value of the environment variable', () => { + process.env.TEST_VAR = 'test_value'; + expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value'); + }); + + test('should return the original string if the envrionment variable is not defined correctly', () => { + process.env.TEST_VAR = 'test_value'; + expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }'); + }); + + test('should return the original string if environment variable is not set', () => { + expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}'); + }); + + test('should return the original string if it does not contain an environment variable', () => { + expect(extractEnvVariable('some_string')).toBe('some_string'); + }); + + test('should handle empty strings', () => { + expect(extractEnvVariable('')).toBe(''); + }); + + test('should handle strings without variable format', () => { + expect(extractEnvVariable('no_var_here')).toBe('no_var_here'); + }); + + test('should not process multiple variable formats', () => { + process.env.FIRST_VAR = 'first'; + process.env.SECOND_VAR = 'second'; + expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe( + '${FIRST_VAR} and ${SECOND_VAR}', + ); + }); + }); }); From 5e97b71e660fa47dd5db07385b9d0589eb4adbf7 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 10:55:28 -0500 Subject: [PATCH 38/59] feat: custom config validation via zod schema, rename and move to `./projectRoot/librechat.yaml` --- .gitignore | 3 +++ api/server/services/Config/loadCustomConfig.js | 16 +++++++++++++--- packages/data-provider/src/config.ts | 8 +++++--- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index f360cbba0ac..a09baeed0e0 100644 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,9 @@ bower_components/ .floo .flooignore +#config file +librechat.yaml + # Environment .npmrc .env* diff --git a/api/server/services/Config/loadCustomConfig.js b/api/server/services/Config/loadCustomConfig.js index cd31c57e549..c17d3283b47 100644 --- a/api/server/services/Config/loadCustomConfig.js +++ b/api/server/services/Config/loadCustomConfig.js @@ -1,13 +1,15 @@ const path = require('path'); -const { CacheKeys } = require('librechat-data-provider'); +const { CacheKeys, configSchema } = require('librechat-data-provider'); const loadYaml = require('~/utils/loadYaml'); const { getLogStores } = require('~/cache'); +const { logger } = require('~/config'); -const apiRoot = path.resolve(__dirname, '..', '..', '..'); -const configPath = path.resolve(apiRoot, 'data', 'custom-config.yaml'); +const projectRoot = path.resolve(__dirname, '..', '..', '..', '..'); +const configPath = path.resolve(projectRoot, 'librechat.yaml'); /** * Load custom configuration files and caches the object if the `cache` field at root is true. + * Validation via parsing the config file with the config schema. * @function loadCustomConfig * @returns {Promise} A promise that resolves to null or the custom config object. * */ @@ -18,6 +20,14 @@ async function loadCustomConfig() { return null; } + const result = configSchema.strict().safeParse(customConfig); + if (!result.success) { + logger.error(`Invalid custom config file at ${configPath}`, result.error); + return null; + } else { + logger.info('Loaded custom config file'); + } + if (customConfig.cache) { const cache = getLogStores(CacheKeys.CONFIG_STORE); await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig); diff --git a/packages/data-provider/src/config.ts b/packages/data-provider/src/config.ts index ae5144edf39..3fed012aad0 100644 --- a/packages/data-provider/src/config.ts +++ b/packages/data-provider/src/config.ts @@ -21,9 +21,11 @@ export const endpointSchema = z.object({ export const configSchema = z.object({ version: z.string(), cache: z.boolean(), - endpoints: z.object({ - custom: z.array(endpointSchema.partial()), - }), + endpoints: z + .object({ + custom: z.array(endpointSchema.partial()), + }) + .strict(), }); export enum KnownEndpoints { From dbc0e4b3121876e37b1a515279658636c9f861d8 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 10:55:59 -0500 Subject: [PATCH 39/59] docs: custom config docs and examples --- docs/install/configuration/custom_config.md | 149 ++++++++++++++++++++ docs/install/configuration/index.md | 1 + docs/install/index.md | 1 + librechat.example.yaml | 65 +++++++++ 4 files changed, 216 insertions(+) create mode 100644 docs/install/configuration/custom_config.md create mode 100644 librechat.example.yaml diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md new file mode 100644 index 00000000000..f6a8ac10eeb --- /dev/null +++ b/docs/install/configuration/custom_config.md @@ -0,0 +1,149 @@ +# LibreChat Configuration Guide + +This document provides detailed instructions for configuring the `librechat.yaml` file used by LibreChat. + +In future updates, some of the configurations from [your `.env` file](./dotenv.md) will migrate here. + +Further customization of the current configurations are also planned. + +## Configuration Overview + + +The `librechat.yaml` file contains several key sections. + +**Note:** Fields not specifically mentioned as required are optional. + +### 1. Version +- **Key**: `version` +- **Type**: String +- **Description**: Specifies the version of the configuration file. +- **Example**: `version: 1.0.0` +- **Required** + +### 2. Cache Settings +- **Key**: `cache` +- **Type**: Boolean +- **Description**: Toggles caching on or off. Set to `true` to enable caching. +- **Example**: `cache: true` + +### 3. Endpoints +- **Key**: `endpoints` +- **Type**: Object +- **Description**: Defines custom API endpoints for the application. + - **Sub-Key**: `custom` + - **Type**: Array of Objects + - **Description**: Each object in the array represents a unique endpoint configuration. +- **Required** + +#### Endpoint Object Structure +Each endpoint in the `custom` array should have the following structure: + +- **name**: A unique name for the endpoint. + - Type: String + - Example: `name: "Mistral"` + - **Required** + - **Note**: Will be used as the "title" in the Endpoints Selector + +- **apiKey**: Your API key for the service. Can reference an environment variable, or allow user to provide the value. + - Type: String (apiKey | `"user_provided"`) + - **Example**: `apiKey: "${MISTRAL_API_KEY}"` | `apiKey: "your_api_key"` | `apiKey: "user_provided"` + - **Required** + +- **baseURL**: Base URL for the API. Can reference an environment variable, or allow user to provide the value. + - Type: String (baseURL | `"user_provided"`) + - **Example**: `baseURL: "https://api.mistral.ai/v1"` | `baseURL: "${MISTRAL_BASE_URL}"` | `baseURL: "user_provided"` + - **Required** + +- **iconURL**: The URL to use as the Endpoint Icon. + - Type: Boolean + - Example: `iconURL: https://github.com/danny-avila/LibreChat/raw/main/docs/assets/LibreChat.svg` + - **Note**: The following are "known endpoints" (case-insensitive), which have icons provided for them. If your endpoint `name` matches these, you should omit this field: + - "Mistral" + - "OpenRouter" + +- **models**: Configuration for models. +- **Required** + - **default**: An array of strings indicating the default models to use. At least one value is required. + - Type: Array of Strings + - Example: `default: ["mistral-tiny", "mistral-small", "mistral-medium"]` + - **Note**: If fetching models fails, these defaults are used as a fallback. + - **fetch**: When set to `true`, attempts to fetch a list of models from the API. + - Type: Boolean + - Example: `fetch: true` + - **Note**: May cause slowdowns during initial use of the app if the response is delayed. Defaults to `false`. + +- **titleConvo**: Enables title conversation when set to `true`. + - Type: Boolean + - Example: `titleConvo: true` + +- **titleMethod**: Chooses between "completion" or "functions" for title method. + - Type: String (`"completion"` | `"functions"`) + - Example: `titleMethod: "completion"` + - **Note**: Defaults to "completion" if omitted. + +- **titleModel**: Specifies the model to use for titles. + - Type: String + - Example: `titleModel: "mistral-tiny"` + - **Note**: Defaults to "gpt-3.5-turbo" if omitted. May cause issues if "gpt-3.5-turbo" is not available. + +- **summarize**: Enables summarization when set to `true`. + - Type: Boolean + - Example: `summarize: false` + +- **summaryModel**: Specifies the model to use if summarization is enabled. + - Type: String + - Example: `summaryModel: "mistral-tiny"` + - **Note**: Defaults to "gpt-3.5-turbo" if omitted. May cause issues if "gpt-3.5-turbo" is not available. + +- **forcePrompt**: If `true`, sends a `prompt` parameter instead of `messages`. + - Type: Boolean + - Example: `forcePrompt: false` + - **Note**: This combines all messages into a single text payload, following the OpenAI format. + +- **modelDisplayLabel**: The label displayed in messages for the current AI model. + - Type: String + - Example: `modelDisplayLabel: "Mistral"` + - **Note**: The display order is: + - 1. Custom name set via preset (if available) + - 2. Label derived from the model name (if applicable) + - 3. This value, if the above are not specified. Default is "AI" when not set. + +## Additional Notes +- Ensure that all URLs and keys are correctly specified to avoid connectivity issues. +- Version compatibility should be checked to ensure smooth operation. + +## Example Config + +```yaml +version: 1.0.0 +cache: true +endpoints: + custom: + # Mistral AI API + - name: "Mistral" + apiKey: "your_api_key" + baseURL: "https://api.mistral.ai/v1" + models: + default: ["mistral-tiny", "mistral-small", "mistral-medium"] + titleConvo: true + titleModel: "mistral-tiny" + summarize: false + summaryModel: "mistral-tiny" + forcePrompt: false + modelDisplayLabel: "Mistral" + + # OpenRouter.ai API + - name: "OpenRouter" + # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. + apiKey: "${OPENROUTER_KEY}" + baseURL: "https://openrouter.ai/api/v1" + models: + default: ["gpt-3.5-turbo"] + fetch: true + titleConvo: true + titleModel: "gpt-3.5-turbo" + summarize: false + summaryModel: "gpt-3.5-turbo" + forcePrompt: false + modelDisplayLabel: "OpenRouter" +``` diff --git a/docs/install/configuration/index.md b/docs/install/configuration/index.md index e577a8b3a56..4fae69ea39b 100644 --- a/docs/install/configuration/index.md +++ b/docs/install/configuration/index.md @@ -7,6 +7,7 @@ weight: 2 # Configuration * ⚙️ [Environment Variables](./dotenv.md) + * 🖥️ [Custom Configurations](./configuration/custom_config.md) * 🐋 [Docker Compose Override](./docker_override.md) --- * 🤖 [AI Setup](./ai_setup.md) diff --git a/docs/install/index.md b/docs/install/index.md index e6177b07b40..6a9f72084bd 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -17,6 +17,7 @@ weight: 1 ## **[Configuration](./configuration/index.md)** * ⚙️ [Environment Variables](./configuration/dotenv.md) + * 🖥️ [Custom Configurations](./configuration/custom_config.md) * 🐋 [Docker Compose Override](./configuration/docker_override.md) * 🤖 [AI Setup](./configuration/ai_setup.md) * 🚅 [LiteLLM](./configuration/litellm.md) diff --git a/librechat.example.yaml b/librechat.example.yaml new file mode 100644 index 00000000000..214e16e8af6 --- /dev/null +++ b/librechat.example.yaml @@ -0,0 +1,65 @@ +# Configuration version (required) +version: 1.0.0 + +# Cache settings: Set to true to enable caching +cache: true + +# Definition of custom endpoints +endpoints: + custom: + # Mistral AI API + - name: "Mistral" # Unique name for the endpoint + # For `apiKey` and `baseURL`, you can use environment variables that you define. + # recommended environment variables: + apiKey: "${MISTRAL_API_KEY}" + baseURL: "https://api.mistral.ai/v1" + + # Models configuration + models: + # List of default models to use. At least one value is required. + default: ["mistral-tiny", "mistral-small", "mistral-medium"] + # Fetch option: Set to true to fetch models from API. + fetch: true # Defaults to false. + + # Optional configurations + + # Title Conversation setting + titleConvo: true # Set to true to enable title conversation + + # Title Method: Choose between "completion" or "functions". + titleMethod: "completion" # Defaults to "completion" if omitted. + + # Title Model: Specify the model to use for titles. + titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. + + # Summarize setting: Set to true to enable summarization. + summarize: false + + # Summary Model: Specify the model to use if summarization is enabled. + summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted. + + # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`. + forcePrompt: false + + # modelDisplayLabel: The label displayed for the AI model. + modelDisplayLabel: "Mistral" # Default is "AI" when not set. + + # OpenRouter.ai Example + - name: "OpenRouter" + # For `apiKey` and `baseURL`, you can use environment variables that you define. + # recommended environment variables: + # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. + apiKey: "${OPENROUTER_KEY}" + baseURL: "https://openrouter.ai/api/v1" + models: + default: ["gpt-3.5-turbo"] + fetch: true + titleConvo: true + titleModel: "gpt-3.5-turbo" + summarize: false + summaryModel: "gpt-3.5-turbo" + forcePrompt: false + modelDisplayLabel: "OpenRouter" + +# See the Custom Configuration Guide for more information: +# From 3ed91f6ee296856d5b411fb253d39f6d2139dc75 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 11:26:18 -0500 Subject: [PATCH 40/59] fix(OpenAIClient/mistral): mistral does not allow singular system message, also add `useChatCompletion` flag to use openai-node for title completions --- api/app/clients/OpenAIClient.js | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index a6ac2e316ef..9c63f188154 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -449,7 +449,7 @@ class OpenAIClient extends BaseClient { }, opts.abortController || new AbortController(), ); - } else if (typeof opts.onProgress === 'function') { + } else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) { reply = await this.chatCompletion({ payload, clientOptions: opts, @@ -599,9 +599,8 @@ ${convo} ]; try { - const onProgress = () => ({}); title = ( - await this.sendPayload(instructionsPayload, { modelOptions, onProgress }) + await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true }) ).replaceAll('"', ''); } catch (e) { logger.error( @@ -832,6 +831,14 @@ ${convo} ...opts, }); + /* hacky fix for Mistral AI API not allowing a singular system message in payload */ + if (this.completionsUrl.includes('https://api.mistral.ai/v1') && modelOptions.messages) { + const { messages } = modelOptions; + if (messages.length === 1 && messages[0].role === 'system') { + modelOptions.messages[0].role = 'user'; + } + } + let UnexpectedRoleError = false; if (modelOptions.stream) { const stream = await openai.beta.chat.completions From 8910e4f1327aafb69bbc4dc517478fb12f27b169 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 11:26:56 -0500 Subject: [PATCH 41/59] fix(custom/initializeClient): extract env var and use `isUserProvided` function --- .../services/Endpoints/custom/initializeClient.js | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index 9fcb2915225..08f2534606b 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -1,5 +1,6 @@ const { EModelEndpoint } = require('librechat-data-provider'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); +const { isUserProvided, extractEnvVariable } = require('~/server/utils'); const getCustomConfig = require('~/cache/getCustomConfig'); const { OpenAIClient } = require('~/app'); @@ -16,8 +17,8 @@ const initializeClient = async ({ req, res, endpointOption }) => { const customEndpoints = endpoints[EModelEndpoint.custom] ?? []; const endpointConfig = customEndpoints.find((endpointConfig) => endpointConfig.name === endpoint); - const CUSTOM_API_KEY = endpointConfig.apiKey; - const CUSTOM_BASE_URL = endpointConfig.baseURL; + const CUSTOM_API_KEY = extractEnvVariable(endpointConfig.apiKey); + const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL); const customOptions = { titleConvo: endpointConfig.titleConvo, @@ -42,15 +43,15 @@ const initializeClient = async ({ req, res, endpointOption }) => { [endpoint]: CUSTOM_API_KEY, }; - const isUserProvided = credentials[endpoint] === 'user_provided'; + const useUserKey = isUserProvided(credentials[endpoint]); let userKey = null; - if (expiresAt && isUserProvided) { + if (expiresAt && useUserKey) { checkUserKeyExpiry(expiresAt, 'Your API key has expired. Please provide it again.'); userKey = await getUserKey({ userId: req.user.id, name: endpoint }); } - let apiKey = isUserProvided ? userKey : credentials[endpoint]; + let apiKey = useUserKey ? userKey : credentials[endpoint]; if (!apiKey) { throw new Error('API key not provided.'); From 9dbf63b21266a0babb4925695a9f2e5cbb0c382d Mon Sep 17 00:00:00 2001 From: Danny Avila <110412045+danny-avila@users.noreply.github.com> Date: Tue, 2 Jan 2024 11:44:32 -0500 Subject: [PATCH 42/59] Update librechat.example.yaml --- librechat.example.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/librechat.example.yaml b/librechat.example.yaml index 214e16e8af6..ad2d349e422 100644 --- a/librechat.example.yaml +++ b/librechat.example.yaml @@ -62,4 +62,4 @@ endpoints: modelDisplayLabel: "OpenRouter" # See the Custom Configuration Guide for more information: -# +# https://docs.librechat.ai/install/configuration/custom_config.html From abe25b6851bce42923c05d44f85e769e2ef487ac Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 13:14:34 -0500 Subject: [PATCH 43/59] feat(InputWithLabel): add className props, and forwardRef --- .../Input/SetKeyDialog/InputWithLabel.tsx | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/client/src/components/Input/SetKeyDialog/InputWithLabel.tsx b/client/src/components/Input/SetKeyDialog/InputWithLabel.tsx index 270e954ea75..2266a7e460a 100644 --- a/client/src/components/Input/SetKeyDialog/InputWithLabel.tsx +++ b/client/src/components/Input/SetKeyDialog/InputWithLabel.tsx @@ -1,21 +1,26 @@ -import React, { ChangeEvent, FC } from 'react'; +import { forwardRef } from 'react'; +import type { ChangeEvent, FC, Ref } from 'react'; import { cn, defaultTextPropsLabel, removeFocusOutlines } from '~/utils/'; import { Input, Label } from '~/components/ui'; import { useLocalize } from '~/hooks'; interface InputWithLabelProps { + id: string; value: string; - onChange: (event: ChangeEvent) => void; label: string; subLabel?: string; - id: string; + onChange: (event: ChangeEvent) => void; + labelClassName?: string; + inputClassName?: string; + ref?: Ref; } -const InputWithLabel: FC = ({ value, onChange, label, subLabel, id }) => { +const InputWithLabel: FC = forwardRef((props, ref) => { + const { id, value, label, subLabel, onChange, labelClassName = '', inputClassName = '' } = props; const localize = useLocalize(); return ( <> -
+
@@ -24,21 +29,22 @@ const InputWithLabel: FC = ({ value, onChange, label, subLa )}
- ); -}; +}); export default InputWithLabel; From e3b7b065cb5344b79312ecc8b7afc8722a03eb5b Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 14:35:40 -0500 Subject: [PATCH 44/59] fix(streamResponse): handle error edge case where either messages or convos query throws an error --- api/server/utils/streamResponse.js | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/api/server/utils/streamResponse.js b/api/server/utils/streamResponse.js index 1933839fac2..3511f144cc7 100644 --- a/api/server/utils/streamResponse.js +++ b/api/server/utils/streamResponse.js @@ -1,6 +1,8 @@ const crypto = require('crypto'); +const { parseConvo } = require('librechat-data-provider'); const { saveMessage, getMessages } = require('~/models/Message'); const { getConvo } = require('~/models/Conversation'); +const { logger } = require('~/config'); /** * Sends error data in Server Sent Events format and ends the response. @@ -65,12 +67,21 @@ const sendError = async (res, options, callback) => { if (!errorMessage.error) { const requestMessage = { messageId: parentMessageId, conversationId }; - const query = await getMessages(requestMessage); + let query = [], + convo = {}; + try { + query = await getMessages(requestMessage); + convo = await getConvo(user, conversationId); + } catch (err) { + logger.error('[sendError] Error retrieving conversation data:', err); + convo = parseConvo(errorMessage); + } + return sendMessage(res, { final: true, requestMessage: query?.[0] ? query[0] : requestMessage, responseMessage: errorMessage, - conversation: await getConvo(user, conversationId), + conversation: convo, }); } From 204cc1ed8a4471241a78afd504dbaeb994a55040 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 14:39:41 -0500 Subject: [PATCH 45/59] fix(useSSE): handle errorHandler edge cases where error response is and is not properly formatted from API, especially when a conversationId is not yet provided, which ensures stream is properly closed on error --- client/src/hooks/useSSE.ts | 41 ++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/client/src/hooks/useSSE.ts b/client/src/hooks/useSSE.ts index 26267296cc3..64b20914846 100644 --- a/client/src/hooks/useSSE.ts +++ b/client/src/hooks/useSSE.ts @@ -1,3 +1,4 @@ +import { v4 } from 'uuid'; import { useEffect } from 'react'; import { useParams } from 'react-router-dom'; import { @@ -207,10 +208,37 @@ export default function useSSE(submission: TSubmission | null, index = 0) { setIsSubmitting(false); }; - const errorHandler = (data: TResData, submission: TSubmission) => { + const errorHandler = ({ data, submission }: { data?: TResData; submission: TSubmission }) => { const { messages, message } = submission; - if (!data.conversationId) { + const conversationId = message?.conversationId ?? submission?.conversationId; + const parseErrorResponse = (data: TResData | Partial) => { + const metadata = data['responseMessage'] ?? data; + return tMessageSchema.parse({ + ...metadata, + error: true, + parentMessageId: message?.messageId, + }); + }; + + if (!data) { + const convoId = conversationId ?? v4(); + const errorResponse = parseErrorResponse({ + text: 'Error connecting to server', + ...submission, + conversationId: convoId, + }); + setMessages([...messages, message, errorResponse]); + newConversation({ template: { conversationId: convoId } }); + setIsSubmitting(false); + return; + } + + if (!conversationId && !data.conversationId) { + const convoId = v4(); + const errorResponse = parseErrorResponse(data); + setMessages([...messages, message, errorResponse]); + newConversation({ template: { conversationId: convoId } }); setIsSubmitting(false); return; } @@ -318,19 +346,20 @@ export default function useSSE(submission: TSubmission | null, index = 0) { abortConversation(message?.conversationId ?? submission?.conversationId, submission); events.onerror = function (e: MessageEvent) { - console.log('error in opening conn.'); + console.log('error in server stream.'); startupConfig?.checkBalance && balanceQuery.refetch(); events.close(); - let data = {} as TResData; + let data: TResData | undefined = undefined; try { - data = JSON.parse(e.data); + data = JSON.parse(e.data) as TResData; } catch (error) { console.error(error); console.log(e); } - errorHandler(data, { ...submission, message }); + errorHandler({ data, submission: { ...submission, message } }); + events.oncancel(); }; setIsSubmitting(true); From 00baf213e7b8cf52d077c2f90e423c252915d4d0 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 15:08:41 -0500 Subject: [PATCH 46/59] feat: user_provided keys for custom endpoints --- .../Endpoints/custom/initializeClient.js | 48 +++++++------ client/src/common/types.ts | 2 +- .../Chat/Menus/Endpoints/MenuItem.tsx | 11 ++- .../Input/SetKeyDialog/CustomConfig.tsx | 46 ------------- .../Input/SetKeyDialog/CustomEndpoint.tsx | 46 +++++++++++++ .../Input/SetKeyDialog/SetKeyDialog.tsx | 67 +++++++++++++++---- 6 files changed, 138 insertions(+), 82 deletions(-) delete mode 100644 client/src/components/Input/SetKeyDialog/CustomConfig.tsx create mode 100644 client/src/components/Input/SetKeyDialog/CustomEndpoint.tsx diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index 08f2534606b..273a7b19a05 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -30,8 +30,36 @@ const initializeClient = async ({ req, res, endpointOption }) => { contextStrategy: endpointConfig.summarize ? 'summarize' : null, }; + const useUserKey = isUserProvided(CUSTOM_API_KEY); + const useUserURL = isUserProvided(CUSTOM_BASE_URL); + + let userValues = null; + if (expiresAt && (useUserKey || useUserURL)) { + checkUserKeyExpiry( + expiresAt, + `Your API values for ${endpoint} have expired. Please configure them again.`, + ); + userValues = await getUserKey({ userId: req.user.id, name: endpoint }); + try { + userValues = JSON.parse(userValues); + } catch (e) { + throw new Error(`Invalid JSON provided for ${endpoint} user values.`); + } + } + + let apiKey = useUserKey ? userValues.apiKey : CUSTOM_API_KEY; + let baseURL = useUserURL ? userValues.baseURL : CUSTOM_BASE_URL; + + if (!apiKey) { + throw new Error(`${endpoint} API key not provided.`); + } + + if (!baseURL) { + throw new Error(`${endpoint} Base URL not provided.`); + } + const clientOptions = { - reverseProxyUrl: CUSTOM_BASE_URL ?? null, + reverseProxyUrl: baseURL ?? null, proxy: PROXY ?? null, req, res, @@ -39,24 +67,6 @@ const initializeClient = async ({ req, res, endpointOption }) => { ...endpointOption, }; - const credentials = { - [endpoint]: CUSTOM_API_KEY, - }; - - const useUserKey = isUserProvided(credentials[endpoint]); - - let userKey = null; - if (expiresAt && useUserKey) { - checkUserKeyExpiry(expiresAt, 'Your API key has expired. Please provide it again.'); - userKey = await getUserKey({ userId: req.user.id, name: endpoint }); - } - - let apiKey = useUserKey ? userKey : credentials[endpoint]; - - if (!apiKey) { - throw new Error('API key not provided.'); - } - const client = new OpenAIClient(apiKey, clientOptions); return { client, diff --git a/client/src/common/types.ts b/client/src/common/types.ts index cf15e999944..84654297ff5 100644 --- a/client/src/common/types.ts +++ b/client/src/common/types.ts @@ -148,7 +148,7 @@ export type TDisplayProps = TText & export type TConfigProps = { userKey: string; setUserKey: React.Dispatch>; - endpoint: string; + endpoint: EModelEndpoint | string; }; export type TDangerButtonProps = { diff --git a/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx b/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx index f79e6e20523..fd516bbab3b 100644 --- a/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx +++ b/client/src/components/Chat/Menus/Endpoints/MenuItem.tsx @@ -46,7 +46,8 @@ const MenuItem: FC = ({ } }; - const iconKey = endpointsConfig?.[endpoint ?? '']?.type ? 'unknown' : endpoint ?? 'unknown'; + const endpointType = endpointsConfig?.[endpoint ?? '']?.type; + const iconKey = endpointType ? 'unknown' : endpoint ?? 'unknown'; const Icon = icons[iconKey]; return ( @@ -141,7 +142,13 @@ const MenuItem: FC = ({
{userProvidesKey && ( - + )} ); diff --git a/client/src/components/Input/SetKeyDialog/CustomConfig.tsx b/client/src/components/Input/SetKeyDialog/CustomConfig.tsx deleted file mode 100644 index 4781fab8cc1..00000000000 --- a/client/src/components/Input/SetKeyDialog/CustomConfig.tsx +++ /dev/null @@ -1,46 +0,0 @@ -// import * as Checkbox from '@radix-ui/react-checkbox'; -// import { CheckIcon } from '@radix-ui/react-icons'; -import { useFormContext, Controller } from 'react-hook-form'; -import InputWithLabel from './InputWithLabel'; - -const CustomConfig = () => { - const { control } = useFormContext(); - - return ( - - ( - - )} - /> - - ( - - )} - /> - - ( - - )} - /> - - ( - - )} - /> - - ); -}; - -export default CustomConfig; diff --git a/client/src/components/Input/SetKeyDialog/CustomEndpoint.tsx b/client/src/components/Input/SetKeyDialog/CustomEndpoint.tsx new file mode 100644 index 00000000000..fd5aa5d7431 --- /dev/null +++ b/client/src/components/Input/SetKeyDialog/CustomEndpoint.tsx @@ -0,0 +1,46 @@ +import { EModelEndpoint } from 'librechat-data-provider'; +import { useFormContext, Controller } from 'react-hook-form'; +import InputWithLabel from './InputWithLabel'; + +const CustomEndpoint = ({ + endpoint, + userProvideURL, +}: { + endpoint: EModelEndpoint | string; + userProvideURL?: boolean | null; +}) => { + const { control } = useFormContext(); + return ( +
+ ( + + )} + /> + {userProvideURL && ( + ( + + )} + /> + )} + + ); +}; + +export default CustomEndpoint; diff --git a/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx b/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx index 64087a78915..ab259fc58c2 100644 --- a/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx +++ b/client/src/components/Input/SetKeyDialog/SetKeyDialog.tsx @@ -6,9 +6,10 @@ import DialogTemplate from '~/components/ui/DialogTemplate'; import { RevokeKeysButton } from '~/components/Nav'; import { Dialog, Dropdown } from '~/components/ui'; import { useUserKey, useLocalize } from '~/hooks'; +import { useToastContext } from '~/Providers'; +import CustomConfig from './CustomEndpoint'; import GoogleConfig from './GoogleConfig'; import OpenAIConfig from './OpenAIConfig'; -import CustomConfig from './CustomConfig'; import OtherConfig from './OtherConfig'; import HelpText from './HelpText'; @@ -34,21 +35,28 @@ const SetKeyDialog = ({ open, onOpenChange, endpoint, + endpointType, + userProvideURL, }: Pick & { - endpoint: string; + endpoint: EModelEndpoint | string; + endpointType?: EModelEndpoint; + userProvideURL?: boolean | null; }) => { const methods = useForm({ defaultValues: { - customEndpointName: '', - customBaseURL: '', - customModels: '', - customApiKey: '', + apiKey: '', + baseURL: '', + // TODO: allow endpoint definitions from user + // name: '', + // TODO: add custom endpoint models defined by user + // models: '', }, }); const [userKey, setUserKey] = useState(''); const [expiresAtLabel, setExpiresAtLabel] = useState(EXPIRY.TWELVE_HOURS.display); const { getExpiry, saveUserKey } = useUserKey(endpoint); + const { showToast } = useToastContext(); const localize = useLocalize(); const expirationOptions = Object.values(EXPIRY); @@ -58,18 +66,44 @@ const SetKeyDialog = ({ }; const submit = () => { - if (endpoint === EModelEndpoint.custom) { - methods.handleSubmit((data) => console.log(data))(); - return; - } const selectedOption = expirationOptions.find((option) => option.display === expiresAtLabel); const expiresAt = Date.now() + (selectedOption ? selectedOption.value : 0); - saveUserKey(userKey, expiresAt); - onOpenChange(false); + + const saveKey = (key: string) => { + saveUserKey(key, expiresAt); + onOpenChange(false); + }; + + if (endpoint === EModelEndpoint.custom || endpointType === EModelEndpoint.custom) { + // TODO: handle other user provided options besides baseURL and apiKey + methods.handleSubmit((data) => { + const emptyValues = Object.keys(data).filter((key) => { + if (key === 'baseURL' && !userProvideURL) { + return false; + } + return data[key] === ''; + }); + + if (emptyValues.length > 0) { + showToast({ + message: 'The following fields are required: ' + emptyValues.join(', '), + status: 'error', + }); + onOpenChange(true); + } else { + saveKey(JSON.stringify(data)); + methods.reset(); + } + })(); + return; + } + + saveKey(userKey); setUserKey(''); }; - const EndpointComponent = endpointComponents[endpoint] ?? endpointComponents['default']; + const EndpointComponent = + endpointComponents[endpointType ?? endpoint] ?? endpointComponents['default']; const expiryTime = getExpiry(); return ( @@ -94,7 +128,12 @@ const SetKeyDialog = ({ width={185} /> - +
From 3d909cd8168b60d0f72e6a5576311c5fadae58ef Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 15:24:23 -0500 Subject: [PATCH 47/59] fix(config/endpointSchema): do not allow default endpoint values in custom endpoint `name` --- packages/data-provider/src/config.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/data-provider/src/config.ts b/packages/data-provider/src/config.ts index 3fed012aad0..7b31508cc5b 100644 --- a/packages/data-provider/src/config.ts +++ b/packages/data-provider/src/config.ts @@ -1,8 +1,12 @@ import { z } from 'zod'; -import { EModelEndpoint } from './schemas'; +import { EModelEndpoint, eModelEndpointSchema } from './schemas'; export const endpointSchema = z.object({ - name: z.string(), + name: z.string().refine((value) => !eModelEndpointSchema.safeParse(value).success, { + message: `Value cannot be one of the default endpoint (EModelEndpoint) values: ${Object.values( + EModelEndpoint, + ).join(', ')}`, + }), apiKey: z.string(), baseURL: z.string(), models: z.object({ From a301b2e551603ca997ec5902b71eefa5f8845e0e Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 15:57:11 -0500 Subject: [PATCH 48/59] feat(loadConfigModels): extract env variables and optimize fetching models --- .../services/Config/loadConfigModels.js | 79 ++++++++++++------- 1 file changed, 49 insertions(+), 30 deletions(-) diff --git a/api/server/services/Config/loadConfigModels.js b/api/server/services/Config/loadConfigModels.js index e013c5605bd..0abe15a8a1f 100644 --- a/api/server/services/Config/loadConfigModels.js +++ b/api/server/services/Config/loadConfigModels.js @@ -1,7 +1,7 @@ const { CacheKeys, EModelEndpoint } = require('librechat-data-provider'); +const { isUserProvided, extractEnvVariable } = require('~/server/utils'); const { fetchModels } = require('~/server/services/ModelService'); const loadCustomConfig = require('./loadCustomConfig'); -const { isUserProvided } = require('~/server/utils'); const { getLogStores } = require('~/cache'); /** @@ -22,35 +22,54 @@ async function loadConfigModels() { const { endpoints = {} } = customConfig ?? {}; const modelsConfig = {}; - if (Array.isArray(endpoints[EModelEndpoint.custom])) { - const customEndpoints = endpoints[EModelEndpoint.custom].filter( - (endpoint) => - endpoint.baseURL && - endpoint.apiKey && - endpoint.name && - endpoint.models && - (endpoint.models.fetch || endpoint.models.default), - ); - - for (let i = 0; i < customEndpoints.length; i++) { - const endpoint = customEndpoints[i]; - const { models, name, baseURL, apiKey } = endpoint; - - modelsConfig[name] = []; - - // TODO: allow fetching with user provided api key and base url - const shouldFetch = models.fetch && !isUserProvided(apiKey) && !isUserProvided(baseURL); - if (shouldFetch) { - modelsConfig[name] = await fetchModels({ - baseURL, - apiKey, - }); - continue; - } - - if (Array.isArray(models.default)) { - modelsConfig[name] = models.default; - } + if (!Array.isArray(endpoints[EModelEndpoint.custom])) { + return modelsConfig; + } + + const customEndpoints = endpoints[EModelEndpoint.custom].filter( + (endpoint) => + endpoint.baseURL && + endpoint.apiKey && + endpoint.name && + endpoint.models && + (endpoint.models.fetch || endpoint.models.default), + ); + + const fetchPromisesMap = {}; // Map for promises keyed by baseURL + const baseUrlToNameMap = {}; // Map to associate baseURLs with names + + for (let i = 0; i < customEndpoints.length; i++) { + const endpoint = customEndpoints[i]; + const { models, name, baseURL, apiKey } = endpoint; + + const API_KEY = extractEnvVariable(apiKey); + const BASE_URL = extractEnvVariable(baseURL); + + modelsConfig[name] = []; + + if (models.fetch && !isUserProvided(API_KEY) && !isUserProvided(BASE_URL)) { + fetchPromisesMap[BASE_URL] = + fetchPromisesMap[BASE_URL] || fetchModels({ baseURL: BASE_URL, apiKey: API_KEY }); + baseUrlToNameMap[BASE_URL] = baseUrlToNameMap[BASE_URL] || []; + baseUrlToNameMap[BASE_URL].push(name); + continue; + } + + if (Array.isArray(models.default)) { + modelsConfig[name] = models.default; + } + } + + const fetchedData = await Promise.all(Object.values(fetchPromisesMap)); + const baseUrls = Object.keys(fetchPromisesMap); + + for (let i = 0; i < fetchedData.length; i++) { + const currentBaseUrl = baseUrls[i]; + const modelData = fetchedData[i]; + const associatedNames = baseUrlToNameMap[currentBaseUrl]; + + for (const name of associatedNames) { + modelsConfig[name] = modelData; } } From e0509f34d666ae12b173c4e7a5f90d9c7c971046 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 17:17:21 -0500 Subject: [PATCH 49/59] feat: support custom endpoint iconURL for messages and Nav --- client/src/common/types.ts | 1 + client/src/components/Conversations/Convo.tsx | 9 +++++++-- client/src/components/Endpoints/Icon.tsx | 12 +++++++++--- client/src/components/Endpoints/MinimalIcon.tsx | 9 ++++++++- client/src/hooks/Messages/useMessageHelpers.ts | 5 ++++- 5 files changed, 29 insertions(+), 7 deletions(-) diff --git a/client/src/common/types.ts b/client/src/common/types.ts index 84654297ff5..17625731ff0 100644 --- a/client/src/common/types.ts +++ b/client/src/common/types.ts @@ -201,6 +201,7 @@ export type IconProps = Pick & Pick & { size?: number; button?: boolean; + iconURL?: string; message?: boolean; className?: string; endpoint?: EModelEndpoint | string | null; diff --git a/client/src/components/Conversations/Convo.tsx b/client/src/components/Conversations/Convo.tsx index 67bb347d0d1..aa7d2cfd659 100644 --- a/client/src/components/Conversations/Convo.tsx +++ b/client/src/components/Conversations/Convo.tsx @@ -1,7 +1,10 @@ import { useRecoilValue } from 'recoil'; import { useState, useRef } from 'react'; import { useParams } from 'react-router-dom'; -import { useUpdateConversationMutation } from 'librechat-data-provider/react-query'; +import { + useGetEndpointsQuery, + useUpdateConversationMutation, +} from 'librechat-data-provider/react-query'; import type { MouseEvent, FocusEvent, KeyboardEvent } from 'react'; import { useConversations, useNavigateToConvo } from '~/hooks'; import { MinimalIcon } from '~/components/Endpoints'; @@ -15,8 +18,9 @@ type KeyEvent = KeyboardEvent; export default function Conversation({ conversation, retainView, toggleNav, i }) { const { conversationId: currentConvoId } = useParams(); - const activeConvos = useRecoilValue(store.allConversationsSelector); const updateConvoMutation = useUpdateConversationMutation(currentConvoId ?? ''); + const activeConvos = useRecoilValue(store.allConversationsSelector); + const { data: endpointsConfig } = useGetEndpointsQuery(); const { refreshConversations } = useConversations(); const { navigateToConvo } = useNavigateToConvo(); const { showToast } = useToastContext(); @@ -86,6 +90,7 @@ export default function Conversation({ conversation, retainView, toggleNav, i }) const icon = MinimalIcon({ size: 20, + iconURL: endpointsConfig?.[conversation.endpoint ?? '']?.iconURL, endpoint: conversation.endpoint, endpointType: conversation.endpointType, model: conversation.model, diff --git a/client/src/components/Endpoints/Icon.tsx b/client/src/components/Endpoints/Icon.tsx index f8ef0dbffcb..5d9d3fff7c1 100644 --- a/client/src/components/Endpoints/Icon.tsx +++ b/client/src/components/Endpoints/Icon.tsx @@ -15,9 +15,8 @@ import { IconProps } from '~/common'; import { cn } from '~/utils'; const Icon: React.FC = (props) => { - const { size = 30, isCreatedByUser, button, model = '', endpoint, error, jailbreak } = props; - const { user } = useAuthContext(); + const { size = 30, isCreatedByUser, button, model = '', endpoint, error, jailbreak } = props; if (isCreatedByUser) { const username = user?.name || 'User'; @@ -102,7 +101,14 @@ const Icon: React.FC = (props) => { }, null: { icon: , bg: 'grey', name: 'N/A' }, default: { - icon: , + icon: ( + + ), name: endpoint, }, }; diff --git a/client/src/components/Endpoints/MinimalIcon.tsx b/client/src/components/Endpoints/MinimalIcon.tsx index 2bf05c9ffa5..1499dec7ce1 100644 --- a/client/src/components/Endpoints/MinimalIcon.tsx +++ b/client/src/components/Endpoints/MinimalIcon.tsx @@ -41,7 +41,14 @@ const MinimalIcon: React.FC = (props) => { [EModelEndpoint.bingAI]: { icon: , name: 'BingAI' }, [EModelEndpoint.chatGPTBrowser]: { icon: , name: 'ChatGPT' }, default: { - icon: , + icon: ( + + ), name: endpoint, }, }; diff --git a/client/src/hooks/Messages/useMessageHelpers.ts b/client/src/hooks/Messages/useMessageHelpers.ts index a6c03462509..285d1a88dee 100644 --- a/client/src/hooks/Messages/useMessageHelpers.ts +++ b/client/src/hooks/Messages/useMessageHelpers.ts @@ -1,5 +1,6 @@ -import { useEffect, useRef } from 'react'; import copy from 'copy-to-clipboard'; +import { useEffect, useRef } from 'react'; +import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; import type { TMessage } from 'librechat-data-provider'; import type { TMessageProps } from '~/common'; import Icon from '~/components/Endpoints/Icon'; @@ -7,6 +8,7 @@ import { useChatContext } from '~/Providers'; export default function useMessageHelpers(props: TMessageProps) { const latestText = useRef(''); + const { data: endpointsConfig } = useGetEndpointsQuery(); const { message, currentEditId, setCurrentEditId } = props; const { @@ -51,6 +53,7 @@ export default function useMessageHelpers(props: TMessageProps) { const icon = Icon({ ...conversation, ...(message as TMessage), + iconURL: endpointsConfig?.[conversation?.endpoint ?? '']?.iconURL, model: message?.model ?? conversation?.model, size: 28.8, }); From f8a8b42df5dc6efbc3677bccbfa31237978e820b Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 18:57:52 -0500 Subject: [PATCH 50/59] feat(OpenAIClient): add/dropParams support --- api/app/clients/OpenAIClient.js | 19 +++++++++++++++++-- .../Endpoints/custom/initializeClient.js | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 9c63f188154..1c22d6f7d41 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -771,7 +771,9 @@ ${convo} if (!abortController) { abortController = new AbortController(); } - const modelOptions = { ...this.modelOptions }; + + let modelOptions = { ...this.modelOptions }; + if (typeof onProgress === 'function') { modelOptions.stream = true; } @@ -832,13 +834,26 @@ ${convo} }); /* hacky fix for Mistral AI API not allowing a singular system message in payload */ - if (this.completionsUrl.includes('https://api.mistral.ai/v1') && modelOptions.messages) { + if (opts.baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) { const { messages } = modelOptions; if (messages.length === 1 && messages[0].role === 'system') { modelOptions.messages[0].role = 'user'; } } + if (this.options.addParams && typeof this.options.addParams === 'object') { + modelOptions = { + ...modelOptions, + ...this.options.addParams, + }; + } + + if (this.options.dropParams && Array.isArray(this.options.dropParams)) { + this.options.dropParams.forEach((param) => { + delete modelOptions[param]; + }); + } + let UnexpectedRoleError = false; if (modelOptions.stream) { const stream = await openai.beta.chat.completions diff --git a/api/server/services/Endpoints/custom/initializeClient.js b/api/server/services/Endpoints/custom/initializeClient.js index 273a7b19a05..93182fa89e2 100644 --- a/api/server/services/Endpoints/custom/initializeClient.js +++ b/api/server/services/Endpoints/custom/initializeClient.js @@ -21,6 +21,8 @@ const initializeClient = async ({ req, res, endpointOption }) => { const CUSTOM_BASE_URL = extractEnvVariable(endpointConfig.baseURL); const customOptions = { + addParams: endpointConfig.addParams, + dropParams: endpointConfig.dropParams, titleConvo: endpointConfig.titleConvo, titleModel: endpointConfig.titleModel, forcePrompt: endpointConfig.forcePrompt, From 84c786b16a8e0784b0c9e979c45f48f604876e7b Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 19:01:34 -0500 Subject: [PATCH 51/59] docs: update docs with default params, add/dropParams, and notes to use config file instead of `OPENAI_REVERSE_PROXY` --- docs/install/configuration/custom_config.md | 63 ++++++++++++++++++++- docs/install/configuration/dotenv.md | 4 +- docs/install/configuration/free_ai_apis.md | 2 + docs/install/configuration/litellm.md | 2 + librechat.example.yaml | 13 ++++- 5 files changed, 79 insertions(+), 5 deletions(-) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index f6a8ac10eeb..b3875ab2596 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -48,11 +48,13 @@ Each endpoint in the `custom` array should have the following structure: - Type: String (apiKey | `"user_provided"`) - **Example**: `apiKey: "${MISTRAL_API_KEY}"` | `apiKey: "your_api_key"` | `apiKey: "user_provided"` - **Required** + - **Note**: It's highly recommended to use the env. variable reference for this field, i.e. `${YOUR_VARIABLE}` - **baseURL**: Base URL for the API. Can reference an environment variable, or allow user to provide the value. - Type: String (baseURL | `"user_provided"`) - **Example**: `baseURL: "https://api.mistral.ai/v1"` | `baseURL: "${MISTRAL_BASE_URL}"` | `baseURL: "user_provided"` - **Required** + - **Note**: It's highly recommended to use the env. variable reference for this field, i.e. `${YOUR_VARIABLE}` - **iconURL**: The URL to use as the Endpoint Icon. - Type: Boolean @@ -89,6 +91,7 @@ Each endpoint in the `custom` array should have the following structure: - **summarize**: Enables summarization when set to `true`. - Type: Boolean - Example: `summarize: false` + - **Note**: This feature requires an OpenAI Functions compatible API - **summaryModel**: Specifies the model to use if summarization is enabled. - Type: String @@ -100,17 +103,68 @@ Each endpoint in the `custom` array should have the following structure: - Example: `forcePrompt: false` - **Note**: This combines all messages into a single text payload, following the OpenAI format. -- **modelDisplayLabel**: The label displayed in messages for the current AI model. +- **modelDisplayLabel**: The label displayed in messages next to the Icon for the current AI model. - Type: String - Example: `modelDisplayLabel: "Mistral"` - **Note**: The display order is: - 1. Custom name set via preset (if available) - 2. Label derived from the model name (if applicable) - - 3. This value, if the above are not specified. Default is "AI" when not set. + - 3. This value, `modelDisplayLabel`, is used if the above are not specified. Defaults to "AI". + +- **addParams**: Adds additional parameters to requests. + - Type: Object/Dictionary + - **Description**: Adds/Overrides parameters. Useful for specifying API-specific options. + - **Example**: +```yaml + addParams: + safe_mode: true +``` + +- **dropParams**: Removes default parameters from requests. + - Type: Array of Strings + - **Description**: Excludes specified default parameters. Useful for APIs that do not accept or recognize certain parameters. + - **Example**: `dropParams: ["stop", "temperature", "top_p"]` + - **Note**: For a list of default parameters sent with every request, see the "Default Parameters" Section below. ## Additional Notes - Ensure that all URLs and keys are correctly specified to avoid connectivity issues. -- Version compatibility should be checked to ensure smooth operation. + +## Default Parameters + +Custom endpoints share logic with the OpenAI endpoint, and thus have default parameters tailored to the OpenAI API. + +```json +{ + "model": "your-selected-model", + "temperature": 1, + "top_p": 1, + "presence_penalty": 0, + "frequency_penalty": 0, + "stop": [ + "||>", + "\nUser:", + "<|diff_marker|>", + ], + "user": "LibreChat_User_ID", + "stream": true, + "messages": [ + { + "role": "user", + "content": "hi how are you", + }, + ], +} +``` +### Breakdown +- `model`: The selected model from list of models. +- `temperature`: Defaults to `1` if not provided via preset, +- `top_p`: Defaults to `1` if not provided via preset, +- `presence_penalty`: Defaults to `0` if not provided via preset, +- `frequency_penalty`: Defaults to `0` if not provided via preset, +- `stop`: Sequences where the AI will stop generating further tokens. By default, uses the start token (`||>`), the user label (`\nUser:`), and end token (`<|diff_marker|>`). Up to 4 sequences can be provided to the [OpenAI API](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) +- `user`: A unique identifier representing your end-user, which can help OpenAI to [monitor and detect abuse](https://platform.openai.com/docs/api-reference/chat/create#chat-create-user). +- `stream`: If set, partial message deltas will be sent, like in ChatGPT. Otherwise, generation will only be available when completed. +- `messages`: [OpenAI format for messages](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages); the `name` field is added to messages with `system` and `assistant` roles when a prompt prefix (custom name) is specified via preset. ## Example Config @@ -131,6 +185,9 @@ endpoints: summaryModel: "mistral-tiny" forcePrompt: false modelDisplayLabel: "Mistral" + addParams: + safe_mode: true + dropParams: ["stop", "temperature", "top_p"] # OpenRouter.ai API - name: "OpenRouter" diff --git a/docs/install/configuration/dotenv.md b/docs/install/configuration/dotenv.md index 5eda7ba3f7b..a384175b32c 100644 --- a/docs/install/configuration/dotenv.md +++ b/docs/install/configuration/dotenv.md @@ -302,12 +302,14 @@ OPENAI_TITLE_MODEL=gpt-3.5-turbo OPENAI_SUMMARIZE=true ``` -> **Not yet implemented**: this will be a conversation option enabled by default to save users on tokens. We are using the ConversationSummaryBufferMemory method to summarize messages. To learn more about this, see this article: [https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/](https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/) +> **Experimental**: We are using the ConversationSummaryBufferMemory method to summarize messages. To learn more about this, see this article: [https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/](https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/) - Reverse proxy settings for OpenAI: - see: [LiteLLM](./litellm.md) - see also: [Free AI APIs](./free_ai_apis.md#nagaai) +**Important**: As of v0.6.6, it's recommend you use the `librechat.yaml` [Configuration file (guide here)](./custom_config.md) to add Reverse Proxies as separate endpoints. + ```bash OPENAI_REVERSE_PROXY= ``` diff --git a/docs/install/configuration/free_ai_apis.md b/docs/install/configuration/free_ai_apis.md index 671ed9b6206..bc3542dd037 100644 --- a/docs/install/configuration/free_ai_apis.md +++ b/docs/install/configuration/free_ai_apis.md @@ -34,6 +34,8 @@ OPENAI_REVERSE_PROXY=https://api.naga.ac/v1/chat/completions # OPENAI_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613 ``` +**Important**: As of v0.6.6, it's recommend you use the `librechat.yaml` [Configuration file (guide here)](./custom_config.md) to add Reverse Proxies as separate endpoints. + **Note:** The `OPENAI_MODELS` variable is commented out so that the server can fetch nagaai/api/v1/models for all available models. Uncomment and adjust if you wish to specify which exact models you want to use. It's worth noting that not all models listed by their API will work, with or without this project. The exact URL may also change, just make sure you include `/v1/chat/completions` in the reverse proxy URL if it ever changes. diff --git a/docs/install/configuration/litellm.md b/docs/install/configuration/litellm.md index c9f86368993..d4dfdd6db96 100644 --- a/docs/install/configuration/litellm.md +++ b/docs/install/configuration/litellm.md @@ -62,6 +62,8 @@ git clone https://github.com/danny-avila/LibreChat.git OPENAI_REVERSE_PROXY=http://host.docker.internal:8000/v1/chat/completions ``` +**Important**: As of v0.6.6, it's recommend you use the `librechat.yaml` [Configuration file (guide here)](./custom_config.md) to add Reverse Proxies as separate endpoints. + #### 3. Save fake OpenAI key in Librechat's `.env` Copy Librechat's `.env.example` to `.env` and overwrite the default OPENAI_API_KEY (by default it requires the user to pass a key). diff --git a/librechat.example.yaml b/librechat.example.yaml index ad2d349e422..b2a9bbaf934 100644 --- a/librechat.example.yaml +++ b/librechat.example.yaml @@ -41,9 +41,20 @@ endpoints: # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`. forcePrompt: false - # modelDisplayLabel: The label displayed for the AI model. + # The label displayed for the AI model in messages. modelDisplayLabel: "Mistral" # Default is "AI" when not set. + # Add additional parameters to the request. Default params will be overwritten. + addParams: + safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/ + + # Drop Default params parameters from the request. See default params in guide linked below. + dropParams: ["stop", "temperature", "top_p"] + # - stop # dropped since it's not recognized by Mistral AI API + # `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used: + # - temperature + # - top_p + # OpenRouter.ai Example - name: "OpenRouter" # For `apiKey` and `baseURL`, you can use environment variables that you define. From facd6f9855e96a7450980e4241f023a61f34ecbe Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Tue, 2 Jan 2024 19:18:55 -0500 Subject: [PATCH 52/59] docs: update docs with additional notes --- docs/install/configuration/custom_config.md | 4 ++-- docs/install/configuration/index.md | 2 +- docs/install/index.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index b3875ab2596..8066d8305e4 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -101,7 +101,7 @@ Each endpoint in the `custom` array should have the following structure: - **forcePrompt**: If `true`, sends a `prompt` parameter instead of `messages`. - Type: Boolean - Example: `forcePrompt: false` - - **Note**: This combines all messages into a single text payload, following the OpenAI format. + - **Note**: This combines all messages into a single text payload, [following OpenAI format](https://github.com/pvicente/openai-python/blob/main/chatml.md), and uses the `/completions` endpoint of your baseURL rather than `/chat/completions`. - **modelDisplayLabel**: The label displayed in messages next to the Icon for the current AI model. - Type: String @@ -121,7 +121,7 @@ Each endpoint in the `custom` array should have the following structure: ``` - **dropParams**: Removes default parameters from requests. - - Type: Array of Strings + - Type: Array/List of Strings - **Description**: Excludes specified default parameters. Useful for APIs that do not accept or recognize certain parameters. - **Example**: `dropParams: ["stop", "temperature", "top_p"]` - **Note**: For a list of default parameters sent with every request, see the "Default Parameters" Section below. diff --git a/docs/install/configuration/index.md b/docs/install/configuration/index.md index 4fae69ea39b..502686a3ec6 100644 --- a/docs/install/configuration/index.md +++ b/docs/install/configuration/index.md @@ -7,7 +7,7 @@ weight: 2 # Configuration * ⚙️ [Environment Variables](./dotenv.md) - * 🖥️ [Custom Configurations](./configuration/custom_config.md) + * 🖥️ [Custom Config & Endpoints](./configuration/custom_config.md) * 🐋 [Docker Compose Override](./docker_override.md) --- * 🤖 [AI Setup](./ai_setup.md) diff --git a/docs/install/index.md b/docs/install/index.md index 6a9f72084bd..01786ee3f75 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -17,7 +17,7 @@ weight: 1 ## **[Configuration](./configuration/index.md)** * ⚙️ [Environment Variables](./configuration/dotenv.md) - * 🖥️ [Custom Configurations](./configuration/custom_config.md) + * 🖥️ [Custom Config & Endpoints](./configuration/custom_config.md) * 🐋 [Docker Compose Override](./configuration/docker_override.md) * 🤖 [AI Setup](./configuration/ai_setup.md) * 🚅 [LiteLLM](./configuration/litellm.md) From 8d59455acc4de58515703960dc3938fddb316775 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 3 Jan 2024 08:58:41 -0500 Subject: [PATCH 53/59] feat(maxTokensMap): add mistral models (32k context) --- api/utils/tokens.js | 1 + 1 file changed, 1 insertion(+) diff --git a/api/utils/tokens.js b/api/utils/tokens.js index 45f794c70e4..b6aa7ba5888 100644 --- a/api/utils/tokens.js +++ b/api/utils/tokens.js @@ -52,6 +52,7 @@ const openAIModels = { 'gpt-3.5-turbo-16k-0613': 15999, 'gpt-3.5-turbo-1106': 16380, // -5 from max 'gpt-4-1106': 127995, // -5 from max + 'mistral-': 31995, // -5 from max }; // Order is important here: by model series and context size (gpt-4 then gpt-3, ascending) From 80ab83406d900a87040c3017855929ce2ecfa463 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 3 Jan 2024 09:06:21 -0500 Subject: [PATCH 54/59] docs: update openrouter notes --- docs/install/configuration/ai_setup.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/docs/install/configuration/ai_setup.md b/docs/install/configuration/ai_setup.md index 4743b2b73e3..ac37ec31013 100644 --- a/docs/install/configuration/ai_setup.md +++ b/docs/install/configuration/ai_setup.md @@ -330,15 +330,19 @@ To use Azure with the Plugins endpoint, make sure the following environment vari > See their available models and pricing here: **[Supported Models](https://openrouter.ai/docs#models)** -OpenRouter is so great, I decided to integrate it to the project as a standalone feature. +OpenRouter is integrated to the LibreChat by overriding the OpenAI endpoint. -**Setup:** +**Important**: As of v0.6.6, you can use OpenRouter as its own standalone endpoint as shown below + +### [Review the Custom Config Guide (click here)](./custom_config.md) to add an `OpenRouter` Endpoint + +**Setup (legacy):** - Signup to **[OpenRouter](https://openrouter.ai/)** and create a key. You should name it and set a limit as well. - Set the environment variable `OPENROUTER_API_KEY` in your .env file to the key you just created. - Set something in the `OPENAI_API_KEY`, it can be anyting, but **do not** leave it blank or set to `user_provided` - Restart your LibreChat server and use the OpenAI or Plugins endpoints. -**Notes:** +**Notes:** - [TODO] **In the future, you will be able to set up OpenRouter from the frontend as well.** - This will override the official OpenAI API or your reverse proxy settings for both Plugins and OpenAI. - On initial setup, you may need to refresh your page twice to see all their supported models populate automatically. From 3a3d249d7eeb191ed9094fc460265391e5680bfe Mon Sep 17 00:00:00 2001 From: Danny Avila <110412045+danny-avila@users.noreply.github.com> Date: Wed, 3 Jan 2024 09:08:12 -0500 Subject: [PATCH 55/59] Update ai_setup.md --- docs/install/configuration/ai_setup.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/install/configuration/ai_setup.md b/docs/install/configuration/ai_setup.md index ac37ec31013..a1096d702f9 100644 --- a/docs/install/configuration/ai_setup.md +++ b/docs/install/configuration/ai_setup.md @@ -332,7 +332,9 @@ To use Azure with the Plugins endpoint, make sure the following environment vari OpenRouter is integrated to the LibreChat by overriding the OpenAI endpoint. -**Important**: As of v0.6.6, you can use OpenRouter as its own standalone endpoint as shown below +**Important**: As of v0.6.6, you can use OpenRouter as its own standalone endpoint: + +![image](https://github.com/danny-avila/LibreChat/assets/110412045/4955bfa3-7b6b-4602-933f-daef89c9eab3) ### [Review the Custom Config Guide (click here)](./custom_config.md) to add an `OpenRouter` Endpoint From 1344c7908697e1fccf311ea9f5c6669b7838dbb2 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 3 Jan 2024 09:12:19 -0500 Subject: [PATCH 56/59] docs(custom_config): add table of contents and fix note about custom name --- docs/install/configuration/custom_config.md | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index 8066d8305e4..ccd641f9bc0 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -1,3 +1,16 @@ +# Table of Contents + +1. [LibreChat Configuration Guide](#librechat-configuration-guide) + - [Configuration Overview](#configuration-overview) + - [1. Version](#1-version) + - [2. Cache Settings](#2-cache-settings) + - [3. Endpoints](#3-endpoints) + - [Endpoint Object Structure](#endpoint-object-structure) + - [Additional Notes](#additional-notes) + - [Default Parameters](#default-parameters) + - [Breakdown](#breakdown) + - [Example Config](#example-config) + # LibreChat Configuration Guide This document provides detailed instructions for configuring the `librechat.yaml` file used by LibreChat. @@ -164,7 +177,7 @@ Custom endpoints share logic with the OpenAI endpoint, and thus have default par - `stop`: Sequences where the AI will stop generating further tokens. By default, uses the start token (`||>`), the user label (`\nUser:`), and end token (`<|diff_marker|>`). Up to 4 sequences can be provided to the [OpenAI API](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stop) - `user`: A unique identifier representing your end-user, which can help OpenAI to [monitor and detect abuse](https://platform.openai.com/docs/api-reference/chat/create#chat-create-user). - `stream`: If set, partial message deltas will be sent, like in ChatGPT. Otherwise, generation will only be available when completed. -- `messages`: [OpenAI format for messages](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages); the `name` field is added to messages with `system` and `assistant` roles when a prompt prefix (custom name) is specified via preset. +- `messages`: [OpenAI format for messages](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages); the `name` field is added to messages with `system` and `assistant` roles when a custom name is specified via preset. ## Example Config From 5cac29fe938fcf9ba95fc51e2ef98be6dfd63e85 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 3 Jan 2024 09:14:38 -0500 Subject: [PATCH 57/59] docs(custom_config): reorder ToC --- docs/install/configuration/custom_config.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index ccd641f9bc0..215da7b8761 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -1,6 +1,14 @@ +# LibreChat Configuration Guide + +This document provides detailed instructions for configuring the `librechat.yaml` file used by LibreChat. + +In future updates, some of the configurations from [your `.env` file](./dotenv.md) will migrate here. + +Further customization of the current configurations are also planned. + # Table of Contents -1. [LibreChat Configuration Guide](#librechat-configuration-guide) +1. [Intro](#librechat-configuration-guide) - [Configuration Overview](#configuration-overview) - [1. Version](#1-version) - [2. Cache Settings](#2-cache-settings) @@ -11,14 +19,6 @@ - [Breakdown](#breakdown) - [Example Config](#example-config) -# LibreChat Configuration Guide - -This document provides detailed instructions for configuring the `librechat.yaml` file used by LibreChat. - -In future updates, some of the configurations from [your `.env` file](./dotenv.md) will migrate here. - -Further customization of the current configurations are also planned. - ## Configuration Overview From 87688851e48176e22bedb0696d628304602cbaef Mon Sep 17 00:00:00 2001 From: Danny Avila <110412045+danny-avila@users.noreply.github.com> Date: Wed, 3 Jan 2024 09:16:21 -0500 Subject: [PATCH 58/59] Update custom_config.md --- docs/install/configuration/custom_config.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index 215da7b8761..9c2b4ee4fcf 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -16,7 +16,7 @@ Further customization of the current configurations are also planned. - [Endpoint Object Structure](#endpoint-object-structure) - [Additional Notes](#additional-notes) - [Default Parameters](#default-parameters) - - [Breakdown](#breakdown) + - [Breakdown of Default Params](#breakdown-of-default-params) - [Example Config](#example-config) ## Configuration Overview @@ -168,7 +168,7 @@ Custom endpoints share logic with the OpenAI endpoint, and thus have default par ], } ``` -### Breakdown +### Breakdown of Default Params - `model`: The selected model from list of models. - `temperature`: Defaults to `1` if not provided via preset, - `top_p`: Defaults to `1` if not provided via preset, From eada8c18be18a3abf6884fef8e58deded3612671 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Wed, 3 Jan 2024 09:20:44 -0500 Subject: [PATCH 59/59] Add note about `max_tokens` field in custom_config.md --- docs/install/configuration/custom_config.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/install/configuration/custom_config.md b/docs/install/configuration/custom_config.md index 9c2b4ee4fcf..f5d2febe91f 100644 --- a/docs/install/configuration/custom_config.md +++ b/docs/install/configuration/custom_config.md @@ -179,6 +179,8 @@ Custom endpoints share logic with the OpenAI endpoint, and thus have default par - `stream`: If set, partial message deltas will be sent, like in ChatGPT. Otherwise, generation will only be available when completed. - `messages`: [OpenAI format for messages](https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages); the `name` field is added to messages with `system` and `assistant` roles when a custom name is specified via preset. +**Note:** The `max_tokens` field is not sent to use the maximum amount of tokens available, which is default OpenAI API behavior. Some alternate APIs require this field, or it may default to a very low value and your responses may appear cut off; in this case, you should add it to `addParams` field as shown in the [Endpoint Object Structure](#endpoint-object-structure). + ## Example Config ```yaml