From 1cf827211f27a03049f22ad368cccec51c038098 Mon Sep 17 00:00:00 2001
From: paychex-ssmithrand
<108530706+paychex-ssmithrand@users.noreply.github.com>
Date: Thu, 11 Apr 2024 09:18:26 -0400
Subject: [PATCH] Downstream (#12)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* 🚀 feat: Support for GPT-4 Turbo/0125 Models (#1643)
* 🧹 Clean Up OpenAI Config and Show 'Set Azure Key' for Plugins (#1649)
* refactor(gptPlugins): prevent edge case where exact word `azure` could be found in azure api Key detection when not an azure key
* refactor(SetKeyDialog): cleanup OpenAI config, show \'set azure key\' when `PLUGINS_USE_AZURE` env var is enabled
* 🐞 fix: Bump `@langchain/google-genai` to Address Chinese Text Bug (#1654)
* 📋 feat: Log Custom Config File and Add Known Model Limits to Custom Endpoint (#1657)
* refactor(custom): add all recognized models to maxTokensMap for custom endpoint
* feat(librechat.yaml): log the custom config file on initial load
* fix(OpenAIClient): pass endpointType/endpoint to `getModelMaxTokens` call
* 👤 fix: Avatar Check in User Auth (#1677)
* 🌍 : Update Japanese translation (#1666)
* Language translation: japanese
* Language translation: japanese
* 🔝fix: Re-order System Message to Top for Mistral API Payloads (#1678)
* fix: re-order System Message if Mistral AI API as it only allows System Message at start of Payload
* fix: re-introduce singular system message change role to `user` if `system`
* 🛡️ : Security Enhancements (#1681)
* fix: sanitize HTTP params and do not send whole error objects backs
* fix: prevent path traversal
* fix: send custom error message for tokenizer route
* chore: handle info exposure vector
* chore(oauth): skip check due to false positive as oauth routes are rate-limited
* chore(app): disable `x-powered-by`
* chore: disable false positives or flagging of hardcoded secrets when they are fake values
* chore: add path traversal safety check
* 📝 docs update: remove ChatGPTbrowser and other small fixes (#1686)
* 🧼 docs: remove references to ChatGPTbrowser and PandoraNext
* docs: clean up .env file
Update OpenAI models with the list of automatically fetched models, update Plugin models with the list of models supporting functions, comment out ToC in custom_config.md since it conflicts with mkdock right sidebar ToC
* 🖋️ docs: fix formatting in linux_install.md
* docs: update example model lists in dotenv.md
* docs: update breaking_changesv.md
* 🤖 docs: add `copilot-gpt4-service` AI setup info (#1695)
Adds information and setup details for [aaamon's copilot-gpt4-service](https://github.com/aaamoon/copilot-gpt4-service) to Unofficial APIs section of the documentation.
Utilizes Github's Copilot to access OpenAI api.
* 🥷 docs: Ninja - ChatGPT-browser reverse proxy (#1697)
* 🥷 docs: Ninja ChatGPT-browser reverse proxy
* 🥷 docs: breaking changes
* 🌍 : Update German Translations (#1683)
Co-authored-by: marlonka
* 🪙 feat: Use OpenRouter Model Data for Token Cost and Context (#1703)
* feat: use openrouter data for model token cost/context
* chore: add ttl for tokenConfig and refetch models if cache expired
* 🚀 feat: Support for GPT-3.5 Turbo/0125 Model (#1704)
* 🚀 feat: Support for GPT-3.5 Turbo/0125 Model
* ci: fix tx test
* 📷 fix: Pass Base64 to Gemini Vision Payload when using CDN URLs (#1705)
* 🖌️feat: ScrolltoBottom & Mobile Improvements; Translation Update (#1651)
* 🖌️feat: Scrolltobottom Style
* 🖌️feat: ScrolltoBottom Style
* 📱Settings tab now centered on mobile / selection bug fixed 🐞, 🌍 Updated Translation
* 🛠️fix: Adjust the width of the settings dialog and address the issue of not seeing selection on the desktop.
* 🎨 Update settings tabs background color for dark mode.
Adjusts background color dynamically based on screen size.
* 🛠️fix: Reverted changes in ScrolltoBottom file
* 🪙 fix(getModelMaxTokens): Retrieve Correct Context Tokens for Azure OpenAI (#1710)
* 🖊️ README.md: update button layout (#1709)
change size and position of the one click deployment buttons
* ♾️ style: Infinite Scroll Nav and Sort Convos by Date/Usage (#1708)
* Style: Infinite Scroll and Group convos by date
* Style: Infinite Scroll and Group convos by date- Redesign NavBar
* Style: Infinite Scroll and Group convos by date- Redesign NavBar - Clean code
* Style: Infinite Scroll and Group convos by date- Redesign NavBar - Redesign NewChat Component
* Style: Infinite Scroll and Group convos by date- Redesign NavBar - Redesign NewChat Component
* Style: Infinite Scroll and Group convos by date- Redesign NavBar - Redesign NewChat Component
* Including OpenRouter and Mistral icon
* refactor(Conversations): cleanup use of utility functions and typing
* refactor(Nav/NewChat): use localStorage `lastConversationSetup` to determine the endpoint to use, as well as icons -> JSX components, remove use of `endpointSelected`
* refactor: remove use of `isFirstToday`
* refactor(Nav): remove use of `endpointSelected`, consolidate scrolling logic to its own hook `useNavScrolling`, remove use of recoil `conversation`
* refactor: Add spinner to bottom of list, throttle fetching, move query hooks to client workspace
* chore: sort by `updatedAt` field
* refactor: optimize conversation infinite query, use optimistic updates, add conversation helpers for managing pagination, remove unnecessary operations
* feat: gen_title route for generating the title for the conversation
* style(Convo): change hover bg-color
* refactor: memoize groupedConversations and return as array of tuples, correctly update convos pre/post message stream, only call genTitle if conversation is new, make `addConversation` dynamically either add/update depending if convo exists in pages already, reorganize type definitions
* style: rename Header NewChat Button -> HeaderNewChat, add NewChatIcon, closely match main Nav New Chat button to ChatGPT
* style(NewChat): add hover bg color
* style: cleanup comments, match ChatGPT nav styling, redesign search bar, make part of new chat sticky header, move Nav under same parent as outlet/mobilenav, remove legacy code, search only if searchQuery is not empty
* feat: add tests for conversation helpers and ensure no duplicate conversations are ever grouped
* style: hover bg-color
* feat: alt-click on convo item to open conversation in new tab
* chore: send error message when `gen_title` fails
---------
Co-authored-by: Walber Cardoso
* 🔧 fix: Patch incorrect Package Installation (#1720)
* 🐳 fix: Update `.devcontainer` Files (#1712)
* fix: modify the base docker image for devcontainer
* fix: restore package-lock.json from main
* 📱 style: Settings UI Enhancements for Mobile (#1721)
* Fix the interface for the mobile version.
* Make uniform margins for buttons.
* 🐳 docs: How to Authenticate MongoDB (#1724)
* refactor: remove `--noauth` flag from `mongod` command
* docs: add mongodb auth instructions
* Update manage_your_database.md
* chore: add example
* Update manage_your_database.md
* 🐳 docs: Formatting Fix (#1725)
* 🐳 docs: Add deploy to Zeabur button and guide (#1727)
* 🐳 docs: Add deploy to Zeabur button in README.md
* 🐳 docs: Add deploy to Zeabur guide in docs
* 🖋️ docs: Formatting Fix (#1726)
* 🍃 docs: Formatting Fix
* 🖋️ docs: Formatting Fix
* 🖋️ docs: Formatting Fix
* 🔒✉️ feat: allow only certain domain (#1562)
* feat: allow only certain domain
* Update dotenv.md
* refactor( registrationController) & handle ALLOWED_REGISTRATION_DOMAINS not specified
* cleanup and moved to AuthService for better error handling
* refactor: replace environment variable with librechat config item, add typedef for custom config, update docs for new registration object and allowedDomains values
* ci(AuthService): test for `isDomainAllowed`
---------
Co-authored-by: Danny Avila
* 🔄🔐 refactor: auth; style: match OpenAI; feat: custom social login order (#1421)
* refactor(Login & Registration)
* fix(Registration) test errors
* refactor(LoginForm & ResetPassword)
* fix(LoginForm): display 'undefined' when loading page; style(SocialButton): match OpenAI's graphics
* some refactor and style update for social logins
* style: width like OpenAI; feat: custom social login order; refactor: alphabetical socials
* fix(Registration & Login) test
* Update .env.example
* Update .env.example
* Update dotenv.md
* refactor: remove `SOCIAL_LOGIN_ORDER` for `socialLogins` configured from `librechat.yaml`
- initialized by AppService, attached as app.locals property
- rename socialLoginOrder and loginOrder to socialLogins app-wide for consistency
- update types and docs
- initialize config variable as array and not singular string to parse
- bump data-provider to 0.3.9
---------
Co-authored-by: Danny Avila
* 🔧 fix: socialLogins default value (#1730)
* fix: socialLogins default value
* ci: add test for `AppService`
* 📇 refactor(convoSchema): index `updatedAt` field (#1732)
* 📖 docs: fix link pointing to dotenv guide (#1739)
* ✏️ update dotenv.md (#1740)
update the note about rebuilding LibreChat after configuration changes since the .env file is now mounted into the volume
* 🛠️ chore: Refactor Update Script to Utilize Docker Compose v2 (#1752)
* 👤 feat: User ID in Model Query; chore: cleanup ModelService (#1753)
* feat: send the LibreChat user ID as a query param when fetching the list of models
* chore: update bun
* chore: change bun command for building data-provider
* refactor: prefer use of `getCustomConfig` to access custom config, also move to `server/services/Config`
* refactor: make endpoints/custom option for the config optional, add userIdQuery, and use modelQueries log store in ModelService
* refactor(ModelService): use env variables at runtime, use default models from data-provider, and add tests
* docs: add `userIdQuery`
* fix(ci): import changed
* 🦙 docs: Ollama Docs Update (#1756)
* Update to use docker-compose.overridge.yml
Add GPU Acceleration links
* Update litellm.md
* 🧩 feat: Support Alternate API Keys for Plugins (#1760)
* refactor(DALL-E): retrieve env variables at runtime and not from memory
* feat(plugins): add alternate env variable handling to allow setting one api key for multiple plugins
* docs: update docs
* 🐳 feat: Push Container Images to DockerHub (#1762)
* ⬤ style: Circular Streaming Cursor (#1736)
* Updated Style Cursor like ChatGPT
* style(Markdown.tsx): add space before cursor when there is text
* fix: revert OpenAIClient.tokens.js change
* fix:(Markdown.tsx): revert change of unused file
* fix(convos.spec.ts): test fix
* chore: remove raw HTML for cursor animations
---------
Co-authored-by: Danny Avila
Co-authored-by: Danny Avila
* 🔌 fix: Minor Plugins Improvements (#1766)
* fix(PluginsClient): don't invoke `getFunctionModelName` when using Azure OpenAI
* fix: plugins styling fix with new cursor
* ci(PluginsClient): test azure exception for getFunctionModelName
* 🧪 ci: Fix Conversation Grouping Tests
* 🖌️ style: Update conversation history groups (#1770)
* style: Add month groups to conversation history
* style: Change "Last x days" to "Previous x days" to match ChatGPT
* style: Add "Yesterday" to conversation groups to match ChatGPT
* fix: use startOfDay for Yesterday conversation group
* fix: Output month name instead of number in conversation group name
* test: Validate new conversation groups are created properly
* fix: Formatting of month category string was wrong
* 🎉 happy birthday LibreChat (#1768)
* happy birthday LibreChat
* Refactor endpoint condition in Landing component
* Update birthday message in Eng.tsx
* fix(/config): avoid nesting ternaries
* refactor(/config): check birthday
---------
Co-authored-by: Danny Avila
* 🐳 docs: update to compose v2 (#1767)
* ✔️ docs: update custom_config.md (#1774)
Add link to https://yamlchecker.com/
* 📝 docs: clarifications - override file & balance/token count (#1783)
* 📝 docs: override file clarification
* 📝 docs: override file clarification
* 📝 docs: balance & token count clarification
* ✨ Release: v0.6.9 (#1789)
* chore: Update Docker build and push actions (#1790)
* 🐋 chore: add tag for `latest` for ci/cd docker builds (#1791)
* ✨ feat: Assistants API, General File Support, Side Panel, File Explorer (#1696)
* feat: assistant name/icon in Landing & Header
* feat: assistname in textarea placeholder, and use `Assistant` as default name
* feat: display non-image files in user messages
* fix: only render files if files.length is > 0
* refactor(config -> file-config): move file related configuration values to separate module, add excel types
* chore: spreadsheet file rendering
* fix(Landing): dark mode style for Assistant Name
* refactor: move progress incrementing to own hook, start smaller, cap near limit \(1\)
* refactor(useContentHandler): add empty Text part if last part was completed tool or image
* chore: add accordion trigger border styling for dark mode
* feat: Assistant Builder model selection
* chore: use Spinner when Assistant is mutating
* fix(get/assistants): return correct response object `AssistantListResponse`
* refactor(Spinner): pass size as prop
* refactor: make assistant crud mutations optimistic, add types for options
* chore: remove assistants route and view
* chore: move assistant builder components to separate directory
* feat(ContextButton): delete Assistant via context button/dialog, add localization
* refactor: conditionally show use and context menu buttons, add localization for create assistant
* feat: save side panel states to localStorage
* style(SidePanel): improve avatar menu and assistant select styling for dark mode
* refactor: make NavToggle reusable for either side (left or right), add SidePanel Toggle with ability to close it completely
* fix: resize handle and navToggle behavior
* fix(/avatar/:assistant_id): await `deleteFile` and assign unique name to uploaded image
* WIP: file UI components from PR #576
* refactor(OpenAIMinimalIcon): pass className
* feat: formatDate helper fn
* feat: DataTableColumnHeader
* feat: add row selection, formatted row values, number of rows selected
* WIP: add files to Side panel temporarily
* feat: `LB_QueueAsyncCall`: Leaky Bucket queue for external APIs, use in `processDeleteRequest`
* fix(TFile): correct `source` type with `FileSources`
* fix(useFileHandling): use `continue` instead of return when iterating multiple files, add file type to extendedFile
* chore: add generic setter type
* refactor(processDeleteRequest): settle promises to prevent rejections from processing deletions, log errors
* feat: `useFileDeletion` to reuse file deletion logic
* refactor(useFileDeletion): make `setFiles` an optional param and use object as param
* feat: useDeleteFilesFromTable
* feat: use real `files` data and add deletion action to data table
* fix(Table): make headers sticky
* feat: add dynamic filtering for columns; only show to user Host or OpenAI storage type
* style(DropdownMenu): replace `slate` with `gray`
* style(DataTable): apply dark mode themes and other misc styling
* style(Columns): add color to OpenAI Storage option
* refactor(FileContainer): make file preview reusable
* refactor(Images): make image preview reusable
* refactor(FilePreview): make file prop optional for FileIcon and FilePreview, fix relative style
* feat(Columns): add file/image previews, set a minimum size to show for file size in bytes
* WIP: File Panel with real files and formatted
* feat: open files dialog from panel
* style: file data table mobile and general column styling fixes
* refactor(api/files): return files sorted by the most recently updated
* refactor: provide fileMap through context to prevent re-selecting files to map in different areas; remove unused imports commented out in PanelColumns
* refactor(ExtendFile): make File type optional, add `attached` to prevent attached files from being deleted on remove, make Message.files a partial TFile type
* feat: attach files through file panel
* refactor(useFileHandling): move files to the start of cache list when uploaded
* refactor(useDeleteFilesMutation): delete files from cache when successfully deleted from server
* fix(FileRow): handle possible edge case of duplication due to attaching recently uploaded file
* style(SidePanel): make resize grip border transparent, remove unnecessary styling on close sidepanel button
* feat: action utilities and tests
* refactor(actions): add `ValidationResult` type and change wording for no server URL found
* refactor(actions): check for empty server URL
* fix(data-provider): revert tsconfig to fix type issue resolution
* feat(client): first pass of actions input for assistants
* refactor(FunctionSignature): change method to output object instead of string
* refactor(models/Assistant): add actions field to schema, use searchParams object for methods, and add `getAssistant`
* feat: post actions input first pass
- create new Action document
- add actions to Assistant DB document
- create /action/:assistant_id POST route
- pass more props down from PanelSwitcher, derive assistant_id from switcher
- move privacy policy to ActionInput
- reset data on input change/validation
- add `useUpdateAction`
- conform FunctionSignature type to FunctionTool
- add action, assistant doc, update hook related types
* refactor: optimize assistant/actions relationship
- past domain in metadata as hostname and not a URL
- include domain in tool name
- add `getActions` for actions retrieval by user
- add `getAssistants` for assistant docs retrieval by user
- add `assistant_id` to Action schema
- move actions to own module as a subroute to `api/assistants`
- add `useGetActionsQuery` and `useGetAssistantDocsQuery` hooks
- fix Action type def
* feat: show assistant actions in assistant builder
* feat: switch to actions on action click, editing action styling
* fix: add Assistant state for builder panel to allow immediate selection of newly created assistants as well as retaining the current assistant when switching to a different panel within the builder
* refactor(SidePanel/NavToggle): offset less from right when SidePanel is completely collapsed
* chore: rename `processActions` -> `processRequiredActions`
* chore: rename Assistant API Action to RequiredAction
* refactor(actions): avoid nesting actual API params under generic `requestBody` to optimize LLM token usage
* fix(handleTools): avoid calling `validTool` if not defined, add optional param to skip the loading of specs, which throws an error in the context of assistants
* WIP: working first pass of toolCalls generated from openapi specs
* WIP: first pass ToolCall styling
* feat: programmatic iv encryption/decryption helpers
* fix: correct ActionAuth types/enums, and define type for AuthForm
* feat: encryption/decryption helpers for Action AuthMetadata
* refactor(getActions): remove sensitive fields from query response
* refactor(POST/actions): encrypt and remove sensitive fields from mutation response
* fix(ActionService): change ESM import to CJS
* feat: frontend auth handling for actions + optimistic update on action update/creation
* refactor(actions): use the correct variables and types for setAuth method
* refactor: POST /:assistant_id action can now handle updating an existing action, add `saved_auth_fields` to determine when user explicitly saves new auth creds. only send auth metadata if user explicitly saved fields
* refactor(createActionTool): catch errors and send back meaningful error message, add flag to `getActions` to determine whether to retrieve sensitive values or not
* refactor(ToolService): add `action` property to ToolCall PartMetadata to determine if the tool call was an action, fix parsing function name issue with actionDelimiter
* fix(ActionRequest): use URL class to correctly join endpoint parts for `execute` call
* feat: delete assistant actions
* refactor: conditionally show Available actions
* refactor: show `retrieval` and `code_interpreter` as Capabilities, swap `Switch` for `Checkbox`
* chore: remove shadow-stroke from messages
* WIP: first pass of Assistants Knowledge attachments
* refactor: remove AssistantsProvider in favor of FormProvider, fix selectedAssistant re-render bug, map Assistant file_ids to files via fileMap, initialize Knowledge component with mapped files if any exist
* fix: prevent deleting files on assistant file upload
* chore: remove console.log
* refactor(useUploadFileMutation): update files and assistants cache on upload
* chore: disable oauth option as not supported yet
* feat: cancel assistant runs
* refactor: initialize OpenAI client with helper function, resolve all related circular dependencies
* fix(DALL-E): initialization
* fix(process): openai client initialization
* fix: select an existing Assistant when the active one is deleted
* chore: allow attaching files for assistant endpoint, send back relevant OpenAI error message when uploading, deconstruct openAI initialization correctly, add `message_file` to formData when a file is attached to the message but not the assistant
* fix: add assistant_id on newConvo
* fix(initializeClient): import fix
* chore: swap setAssistant for setOption in useEffect
* fix(DALL-E): add processFileURL to loadTools call
* chore: add customConfig to debug logs
* feat: delete threads on convo delete
* chore: replace Assistants icon
* chore: remove console.dir() in `abortRun`
* feat(AssistantService): accumulate text values from run in openai.responseText
* feat: titling for assistants endpoint
* chore: move panel file components to appropriate directory, add file checks for attaching files, change icon for Attach Files
* refactor: add localizations to tools, plugins, add condition for adding/remove user plugins so tool selections don't affect this value
* chore: disable `import from url` action for now
* chore: remove textMimeTypes from default fileConfig for now
* fix: catch tool errors and send as outputs with error messages
* fix: React warning about button as descendant of button
* style: retrieval and cancelled icon
* WIP: pass isSubmitting to Parts, use InProgressCall to display cancelled tool calls correctly, show domain/function name
* fix(meilisearch): fix `postSaveHook` issue where indexing expects a mongo document, and join all text content parts for meili indexing
* ci: fix dall-e tests
* ci: fix client tests
* fix: button types in actions panel
* fix: plugin auth form persisting across tool selections
* fix(ci): update AppService spec with `loadAndFormatTools`
* fix(clearConvos): add id check earlier on
* refactor(AssistantAvatar): set previewURL dynamically when emtadata.avatar changes
* feat(assistants): addTitle cache setting
* fix(useSSE): resolve rebase conflicts
* fix: delete mutation
* style(SidePanel): make grip visible on active and hover, invisible otherwise
* ci: add data-provider tests to workflow, also update eslint/tsconfig to recognize specs, and add `text/csv` to fileConfig
* fix: handle edge case where auth object is undefined, and log errors
* refactor(actions): resolve schemas, add tests for resolving refs, import specs from separate file for tests
* chore: remove comment
* fix(ActionsInput): re-render bug when initializing states with action fields
* fix(patch/assistant): filter undefined tools
* chore: add logging for errors in assistants routes
* fix(updateAssistant): map actions to functions to avoid overwriting
* fix(actions): properly handle GET paths
* fix(convos): unhandled delete thread exception
* refactor(AssistantService): pass both thread_id and conversationId when sending intermediate assistant messages, remove `mapMessagesToSteps` from AssistantService
* refactor(useSSE): replace all messages with runMessages and pass latestMessageId to abortRun; fix(checkMessageGaps): include tool calls when syncing messages
* refactor(assistants/chat): invoke `createOnTextProgress` after thread creation
* chore: add typing
* style: sidepanel styling
* style: action tool call domain styling
* feat(assistants): default models, limit retrieval to certain models, add env variables to to env.example
* feat: assistants api key in EndpointService
* refactor: set assistant model to conversation on assistant switch
* refactor: set assistant model to conversation on assistant select from panel
* fix(retrieveAndProcessFile): catch attempt to download file with `assistant` purpose which is not allowed; add logging
* feat: retrieval styling, handling, and logging
* chore: rename ASSISTANTS_REVERSE_PROXY to ASSISTANTS_BASE_URL
* feat: FileContext for file metadata
* feat: context file mgmt and filtering
* style(Select): hover/rounded changes
* refactor: explicit conversation switch, endpoint dependent, through `useSelectAssistant`, which does not create new chat if current endpoint is assistant endpoint
* fix(AssistantAvatar): make empty previewURL if no avatar present
* refactor: side panel mobile styling
* style: merge tool and action section, optimize mobile styling for action/tool buttons
* fix: localStorage issues
* fix(useSelectAssistant): invoke react query hook directly in select hook as Map was not being updated in time
* style: light mode fixes
* fix: prevent sidepanel nav styling from shifting layout up
* refactor: change default layout (collapsed by default)
* style: mobile optimization of DataTable
* style: datatable
* feat: client-side hide right-side panel
* chore(useNewConvo): add partial typing for preset
* fix(useSelectAssistant): pass correct model name by using template as preset
* WIP: assistant presets
* refactor(ToolService): add native solution for `TavilySearchResults` and log tool output errors
* refactor: organize imports and use native TavilySearchResults
* fix(TavilySearchResults): stringify result
* fix(ToolCall): show tool call outputs when not an action
* chore: rename Prompt Prefix to custom instructions (in user facing text only)
* refactor(EditPresetDialog): Optimize setting title by debouncing, reset preset on dialog close to avoid state mixture
* feat: add `presetOverride` to overwrite active conversation settings when saving a Preset (relevant for client side updates only)
* feat: Assistant preset settings (client-side)
* fix(Switcher): only set assistant_id and model if current endpoint is Assistants
* feat: use `useDebouncedInput` for updating conversation settings, starting with EditPresetDialog title setting and Assistant instructions setting
* feat(Assistants): add instructions field to settings
* feat(chat/assistants): pass conversation settings to run body
* wip: begin localization and only allow actions if the assistant is created
* refactor(AssistantsPanel): knowledge localization, allow tools on creation
* feat: experimental: allow 'priming' values before assistant is created, that would normally require an assistant_id to be defined
* chore: trim console logs and make more meaningful
* chore: toast messages
* fix(ci): date test
* feat: create file when uploading Assistant Avatar
* feat: file upload rate limiting from custom config with dynamic file route initialization
* refactor: use file upload limiters on post routes only
* refactor(fileConfig): add endpoints field for endpoint specific fileconfigs, add mergeConfig function, add tests
* refactor: fileConfig route, dynamic multer instances used on all '/' and '/images' POST routes, data service and query hook
* feat: supportedMimeTypesSchema, test for array of regex
* feat: configurable file config limits
* chore: clarify assistants file knowledge prereq.
* chore(useTextarea): default to localized 'Assistant' if assistant name is empty
* feat: configurable file limits and toggle file upload per endpoint
* fix(useUploadFileMutation): prevent updating assistant.files cache if file upload is a message_file attachment
* fix(AssistantSelect): set last selected assistant only when timeout successfully runs
* refactor(queries): disable assistant queries if assistants endpoint is not enabled
* chore(Switcher): add localization
* chore: pluralize `assistant` for `EModelEndpoint key and value
* feat: show/hide assistant UI components based on endpoint availability; librechat.yaml config for disabling builder section and setting polling/timeout intervals
* fix(compactEndpointSchemas): use EModelEndpoint for schema access
* feat(runAssistant): use configured values from `librechat.yaml` for `pollIntervalMs` and `timeout`
* fix: naming issue
* wip: revert landing
* 🎉 happy birthday LibreChat (#1768)
* happy birthday LibreChat
* Refactor endpoint condition in Landing component
* Update birthday message in Eng.tsx
* fix(/config): avoid nesting ternaries
* refactor(/config): check birthday
---------
Co-authored-by: Danny Avila
* fix: landing
* fix: landing
* fix(useMessageHelpers): hardcoded check to use EModelEndpoint instead
* fix(ci): convo test revert to main
* fix(assistants/chat): fix issue where assistant_id was being saved as model for convo
* chore: added logging, promises racing to prevent longer timeouts, explicit setting of maxRetries and timeouts, robust catching of invalid abortRun params
* refactor: use recoil state for `showStopButton` and only show for assistants endpoint after syncing conversation data
* refactor: optimize abortRun strategy using localStorage, refactor `abortConversation` to use async/await and await the result, refactor how the abortKey cache is set for runs
* fix(checkMessageGaps): assign `assistant_id` to synced messages if defined; prevents UI from showing blank assistant for cancelled messages
* refactor: re-order sequence of chat route, only allow aborting messages after run is created, cancel abortRun if there was a cancelling error (likely due already cancelled in chat route), and add extra logging
* chore(typedefs): add httpAgent type to OpenAIClient
* refactor: use custom implementation of retrieving run with axios to allow for timing out run query
* fix(waitForRun): handle timed out run retrieval query
* refactor: update preset conditions:
- presets will retain settings when a different endpoint is selected; for existing convos, either when modular or is assistant switch
- no longer use `navigateToConvo` on preset select
* fix: temporary calculator hack as expects string input when invoked
* fix: cancel abortRun only when cancelling error is a result of the run already being cancelled
* chore: remove use of `fileMaxSizeMB` and total counterpart (redundant)
* docs: custom config documentation update
* docs: assistants api setup and dotenv, new custom config fields
* refactor(Switcher): make Assistant switcher sticky in SidePanel
* chore(useSSE): remove console log of data and message index
* refactor(AssistantPanel): button styling and add secondary select button to bottom of panel
* refactor(OpenAIClient): allow passing conversationId to RunManager through titleConvo and initializeLLM to properly record title context tokens used in cases where conversationId was not defined by the client
* feat(assistants): token tracking for assistant runs
* chore(spendTokens): improve logging
* feat: support/exclude specific assistant Ids
* chore: add update `librechat.example.yaml`, optimize `AppService` handling, new tests for `AppService`, optimize missing/outdate config logging
* chore: mount docker logs to root of project
* chore: condense axios errors
* chore: bump vite
* chore: vite hot reload fix using latest version
* chore(getOpenAIModels): sort instruct models to the end of models list
* fix(assistants): user provided key
* fix(assistants): user provided key, invalidate more queries on revoke
---------
Co-authored-by: Marco Beretta <81851188+Berry-13@users.noreply.github.com>
* 🔧 fix(data-provider): add openapi-types dependency (#1797)
* fix(data-provider): add openapi-types dependency
* chore: Bump version to 0.4.1 in package.json
* 🤖 docs(assistants): Additional Setup & Tool Selection Info (#1798)
* 📝 docs: Enhance LibreChat Docker deployment guide (#1796)
- Revise the description for clarity and conciseness.
- Provide a direct comparison between the advanced Docker Compose deployment and the simpler `docker-compose.override.yml` setup.
- Update screenshot link for visual reference.
* ✏️ fix(Convo): Rename Title UX (#1793)
* 🤖 chore: Improve Assistants Run Logging (#1801)
* 👥 fix: Reinstate Default Social Login Values (#1811)
* fix: social logins accidentally removed default in AppService, reinstated and added test
* chore: move birthday to OTHER section and make disabled by default
* 📝 docs: Update docker-compose.override.yml.example
* fix: necessary font changes (#1818)
* fix: necessary font changes
* chore: minor bump
* 🐋 chore: Add Docker Compose Build Latest Main Image workflow (#1819)
* Update main-image-workflow.yml
* Update main-image-workflow.yml
* Update main-image-workflow.yml
* Update main-image-workflow.yml
* 🔀 fix: Endpoint Type Mismatch when Switching Conversations (#1834)
* refactor(useUpdateUserKeysMutation): only invalidate the endpoint whose key is being updated by user
* fix(assistants): await `getUserKeyExpiry` call
* chore: fix spinner loading color
* refactor(initializeClient): make known which endpoint api Key is missing
* fix: prevent an `endpointType` mismatch by making it impossible to assign when the `endpointsConfig` doesn't have a `type` defined, also prefer `getQueryData` call to useQuery in useChatHelpers
* 📝 docs: add env changes to breaking_changes.md and minor fixes (#1812)
* 📝 docs: add env changes to breacking_changes.md
* 📝 docs: replace example in docker_override.md
* 📝 docs: fix images in zeabur.md
* 🔗 chore: Add Stable Discord and Homepage Links (#1835)
* 📝 chore: Update README.md
* 📤 refactor: Utilize `intermediateReply` when `message.content` is Empty
* 🛡️ feat: Model Validation Middleware (#1841)
* refactor: add ViolationTypes enum and add new violation for illegal model requests
* feat: validateModel middleware to protect the backend against illicit requests for unlisted models
* ⬤ style: Uniform Display of Result-Streaming Cursor (#1842)
* 📋 feat: Accumulate Text Parts to Clipboard for Assistant Outputs (#1847)
* 🛠️ refactor: Model Loading and Custom Endpoint Error Handling (#1849)
* fix: handle non-assistant role ChatCompletionMessage error
* refactor(ModelController): decouple res.send from loading/caching models
* fix(custom/initializeClient): only fetch custom endpoint models if models.fetch is true
* refactor(validateModel): load models if modelsConfig is not yet cached
* docs: update on file upload rate limiting
* 🐛 fix: Preserve Default Model in Message Requests (#1857)
* fix: do not remove default model from message request
* chore: bump data-provider
* 🐋 Feat: docker pre-built image by default (#1860)
* 🐋 Feat: docker pre-built image by default
* 🐋 Feat: docker LibreChat ports from .env
* 🎨 feat: Create Avatars of Initials Locally (#1869)
* ✏️docs: add tavily to env.example and dotenv.md (#1866)
* update .env.example
add "TAVILY_API_KEY=" to .env.example
* update dotenv.md
add Tavily to dotenv.md
* 🗨️ fix: Prevent Resetting Title to 'New Chat' on Follow-Up Message (#1870)
* fix: prevent reseting title to 'New Chat' on follow up message
* chore(useSSE): remove empty line
* 🐋 docs: update breaking_changes.md (#1864)
add note about the use of the pre-built image in docker-compose.yml
* 🔀 fix: Correct Expected Behavior for Modular Chat Feature (#1871)
* 📒 docs: Add newline for list to be correctly rendered in UI (#1873)
Currently in the documentation page the bullet list is not rendered correctly. (See first paragraph on this docs page: https://docs.librechat.ai/install/configuration/litellm.html)
* 🔧 feat: optional librechat.yaml path via environment variable (#1858)
Co-authored-by: afel
* 🅰️ feat: Azure Config to Allow Different Deployments per Model (#1863)
* wip: first pass for azure endpoint schema
* refactor: azure config to return groupMap and modelConfigMap
* wip: naming and schema changes
* refactor(errorsToString): move to data-provider
* feat: rename to azureGroups, add additional tests, tests all expected outcomes, return errors
* feat(AppService): load Azure groups
* refactor(azure): use imported types, write `mapModelToAzureConfig`
* refactor: move `extractEnvVariable` to data-provider
* refactor(validateAzureGroups): throw on duplicate groups or models; feat(mapModelToAzureConfig): throw if env vars not present, add tests
* refactor(AppService): ensure each model is properly configured on startup
* refactor: deprecate azureOpenAI environment variables in favor of librechat.yaml config
* feat: use helper functions to handle and order enabled/default endpoints; initialize azureOpenAI from config file
* refactor: redefine types as well as load azureOpenAI models from config file
* chore(ci): fix test description naming
* feat(azureOpenAI): use validated model grouping for request authentication
* chore: bump data-provider following rebase
* chore: bump config file version noting significant changes
* feat: add title options and switch azure configs for titling and vision requests
* feat: enable azure plugins from config file
* fix(ci): pass tests
* chore(.env.example): mark `PLUGINS_USE_AZURE` as deprecated
* fix(fetchModels): early return if apiKey not passed
* chore: fix azure config typing
* refactor(mapModelToAzureConfig): return baseURL and headers as well as azureOptions
* feat(createLLM): use `azureOpenAIBasePath`
* feat(parsers): resolveHeaders
* refactor(extractBaseURL): handle invalid input
* feat(OpenAIClient): handle headers and baseURL for azureConfig
* fix(ci): pass `OpenAIClient` tests
* chore: extract env var for azureOpenAI group config, baseURL
* docs: azureOpenAI config setup docs
* feat: safe check of potential conflicting env vars that map to unique placeholders
* fix: reset apiKey when model switches from originally requested model (vision or title)
* chore: linting
* docs: CONFIG_PATH notes in custom_config.md
* 🖌️ style: auth dark theme (#1862)
* Remove minLength validation and update login link style
* Add theme selector component and update login form styles
* Update styling in Login and LoginForm components
* Update ResetPassword component styles and text color
* Refactor login component and add theme selector
* Add ThemeSelector component to Registration, RequestPasswordReset, and ResetPassword pages
* chore(Login.tsx): remove unused `useCallback`
* chore(Login.tsx) import order
* Update ResetPassword.tsx import order
* Update RequestPasswordReset.tsx import order
* Update Registration.tsx import order
---------
Co-authored-by: Danny Avila
* 🔥chore: bump firebase dependency (#1900)
* 🌍 : Update Portuguese Translations (#1867)
* 🌍 : Update Portuguese Translations
* 🌍 : Fix Portuguese Translations
* fix(Br): lint errors
---------
Co-authored-by: Berry-13 <81851188+Berry-13@users.noreply.github.com>
* 🅰️ feat: Azure AI Studio, Models as a Service Support (#1902)
* feat(data-provider): add Azure serverless inference handling through librechat.yaml
* feat(azureOpenAI): serverless inference handling in api
* docs: update docs with new azureOpenAI endpoint config fields and serverless inference endpoint setup
* chore: remove unnecessary checks for apiKey as schema would not allow apiKey to be undefined
* ci(azureOpenAI): update tests for serverless configurations
* 🛠️ chore: Abort AI Requests on Close & Remove Verbose Logs for Plugins (#1914)
* chore: remove verbose logging of ChatOpenAI
* feat: abort AI requests on request close
* 🤖 docs: Add Groq and other Compatible AI Endpoints (#1915)
* chore: bump bun dependencies
* feat: make `groq` a known endpoint
* docs: compatible ai endpoints
* Update ai_endpoints.md
* Update ai_endpoints.md
* 🍞 fix: Minor fixes and improved Bun support (#1916)
* fix(bun): fix bun compatibility to allow gzip header: https://github.com/oven-sh/bun/issues/267#issuecomment-1854460357
* chore: update custom config examples
* fix(OpenAIClient.chatCompletion): remove redundant call of stream.controller.abort() as `break` aborts the request and prevents abort errors when not called redundantly
* chore: bump bun.lockb
* fix: remove result-thinking class when message is no longer streaming
* fix(bun): improve Bun support by forcing use of old method in bun env, also update old methods with new customizable params
* fix(ci): pass tests
* 🔗 feat: NavLinks customization for Help & Faq URL (#1872)
* help and faq
* fix: using only one var
* revert(types.ts): showHelpAndFaq
* Update dotenv.md
* Update dotenv.md
* 🤖feat: add multiple known endpoints (#1917)
* feat: add known endpoints
* docs: add known endpoints
* update ai_endpoints.md
remove the groq icon from the example
* Update ai_endpoints.md
---------
Co-authored-by: Danny Avila
* 🔗 feat: User Provided Base URL for OpenAI endpoints (#1919)
* chore: bump browserslist-db@latest
* refactor(EndpointService): simplify with `generateConfig`, utilize optional baseURL for OpenAI-based endpoints, use `isUserProvided` helper fn wherever needed
* refactor(custom/initializeClient): use standardized naming for common variables
* feat: user provided baseURL for openAI-based endpoints
* refactor(custom/initializeClient): re-order operations
* fix: knownendpoints enum definition and add FetchTokenConfig, bump data-provider
* refactor(custom): use tokenKey dependent on userProvided conditions for caching and fetching endpointTokenConfig, anticipate token rates from custom config
* refactor(custom): assure endpointTokenConfig is only accessed from cache if qualifies for fetching
* fix(ci): update tests for initializeClient based on userProvideURL changes
* fix(EndpointService): correct baseURL env var for assistants: `ASSISTANTS_BASE_URL`
* fix: unnecessary run cancellation on res.close() when response.run is completed
* feat(assistants): user provided URL option
* ci: update tests and add test for `assistants` endpoint
* chore: leaner condition for request closing
* chore: more descriptive error message to provide keys again
* 🌍 feat: Extend regex to support international usernames (#1918)
* 🌍 Extend regex to support international usernames
* update validators.spec.js
* 🛠️ fix: RunManager, AssistantService and useContentHandler Issues (#1920)
* fix(useContentHandler): retain undefined parts and handle them within `ContentParts` rendering
* fix(AssistantService/in_progress): skip empty messages
* refactor(RunManager): create highly specific `seenSteps` Set keys for RunSteps with use of `getDetailsSignature` and `getToolCallSignature`,to ensure changes from polling are always captured
* 🐳 experimental: Dev Image Workflow & Remove Unused Code (#1928)
* chore: remove unused code in progressCallback, as well as handle reply.trim(), post `getCompletion`
* chore(Dockerfile): remove curl installation
* experimental: dev image parallelized with matrix strategy and building for amd64/arm64 support
* make platforms explicit
* 🐳 chore(Dockerfile): replace `npm ci` with `npm install` for OS specific builds
* 🐳chore(Dockerfile): add additional steps to prevent arm64 build failure
* 🔎docs: update meilisearch instruction (#1930)
* 🔎docs: update meilisearch in mac_install.md
Update the Meilisearch .env variables in `mac_install.md`
* 🔎🐧
* 📝 feat: Improved Textarea Functionality (#1942)
* feat: paste plain text from apps with rich paste data, improved edit message textarea, improved height resizing for long text
* feat(EditMessage): autofocus
* chore: retain user text color when entering edit mode
* 👤 feat: Show Default Icon if No Avatar or Username provided (#1943)
* 🖥️ feat: Match STDOUT Logs with Debug File Logs (#1944)
* chore: improve token balance logging post-request
* feat: match stdout logging with file debug logging when using DEBUG_CONSOLE
* 📚 docs: Separate LiteLLM and Ollama Documentation (#1948)
* Separate LiteLLM and Ollama Documentation
* Clarify Ollama Setup
* Fix litellm config
* 🔧 fix(useTextarea): duplicate text when pasting on chromium (#1951)
* 🔧 fix(EditMessage): duplicate text when pasting (#1970)
* fix(EditMessage): duplicate text when pasting on chromium
* add back paste data handling, prevent default behavior
---------
Co-authored-by: Danny Avila
* 🪙 fix(config): use new field for balance
* 🧩 fix(Plugins): Keep User `agentModel` and Model Validation (#1972)
* fix: do not override model
* temp fix for secondary model validation
* 🦙 doc: add Ollama to index and update icon (#1967)
* 🌍 : Add Hebrew Translation (#1953)
* feat: add hebrew
* fix: review issues
* fix language options
* 🥷🪦 docs: remove ninja and chatgptBrowser (#1973)
* 🧪 fix(ci): update failing `initializeClient` tests with new expected values (#1982)
* fix(ci): update failing tests with new expected values from `getUserKey`
* refactor: safer optional chaining, and ensure apiKey is defined
* 🔎 update meilisearch to v1.6 / 0.37.0 (#1981)
* 🔎 update meilisearch to v1.6 / 0.37.0
* 🔎 update meilisearch to v1.6 / 0.37.0
* 🛠️ refactor(loadConfigModels): make apiKey and baseURL pairings more versatile (#1985)
* 🤖 feat(Anthropic): Claude 3 & Vision Support (#1984)
* chore: bump anthropic SDK
* chore: update anthropic config settings (fileSupport, default models)
* feat: anthropic multi modal formatting
* refactor: update vision models and use endpoint specific max long side resizing
* feat(anthropic): multimodal messages, retry logic, and messages payload
* chore: add more safety to trimming content due to whitespace error for assistant messages
* feat(anthropic): token accounting and resending multiple images in progress
* chore: bump data-provider
* feat(anthropic): resendImages feature
* chore: optimize Edit/Ask controllers, switch model back to req model
* fix: false positive of invalid model
* refactor(validateVisionModel): use object as arg, pass in additional/available models
* refactor(validateModel): use helper function, `getModelsConfig`
* feat: add modelsConfig to endpointOption so it gets passed to all clients, use for properly validating vision models
* refactor: initialize default vision model and make sure it's available before assigning it
* refactor(useSSE): avoid resetting model if user selected a new model between request and response
* feat: show rate in transaction logging
* fix: return tokenCountMap regardless of payload shape
* 🖋️ fix(OpenAIClient): remove typo
* 🔎 feat: Traversaal Search Tool (#1991)
* wip: Traversaal Search Tool
* fix(traversaal): properly handle tool error, show error to LLM, log
* feat(traversaal): finish implementation of structured tool
* chore: change traversaal order
* 🔎 fix(Traversaal): Recognize `authField` during Tool Initialization
* 🖌️ style: Update Light/Dark UI Themes (#1754)
* BIG UI UPDATE
* fix: search bar, dialog template, new chat icon, convo icon and delete/rename button
* moved some color config and a lot of files
* small text fixes and tailwind config refactor
* Update localization and UI styles
* Update styles and add user-select:none to Tooltip component
* Update mobile.css styles for navigation mask and background color
* Update component imports and styles
* Update DeleteButton imports and references
* Update UI components
* Update tooltip delay duration
* Fix styling and update text in various components
* fixed assistant style
* minor style fixes
* revert: removed CreationHeader & CreationPanel
* style: match new styling for SidePanel
* style: match bg-gray-800 to ChatGPT (#212121)
* style: remove slate for gray where applicable to match new light theme
---------
Co-authored-by: Danny Avila
* 📝 docs update: Anthropic models + Traversaal (#1995)
* 📝 docs update: Anthropic models + Traversaal
* 📝 docs update: Anthropic models
* 🖌️ style: update dialog position (#1999)
* style(ChatForm): update styling and fixed style bug
* style:(Dialog): reduced max height style(Settings): fixed dialog position height
* style(Settings): fixed large screen position
* 🔧 style: Improve UI and UX with Style Fixes and Code Refactors (#2002)
* refactor(useSSE): add useCallback to all event handlers
* chore: remove modelName in defaultAssistantFormValues
* fix(SidePanel): fix layout shift on chrome my removing sidenav scrollbar
* style(ChatForm): match ChatGPT textarea effect styling
* style: fix flickering of old background color on refresh
* 📝 docs: additions to deployment guide (#2001)
* docs: add intro to deployment guide
* doc: update intro
* doc: Add NGINX deployment guide and update reverse proxy link
* doc:: add reverse proxy pages and weight for the pages
* doc: Update NGINX configuration file
* doc: imporve new doc
* Doc: fix file names
* doc: fix references names + improve the introduction with chatgpt :-)
* doc: update introduction guide headings
* 💾 chore: Update .env.example (#2004)
* Update .env.example
Make assistants show in the UI by default
* Update dotenv.md
* 🌊 docs: refactor DigitalOcean guide (#2006)
* 🔍 feat: Add Google Search Tool for Assistants (#1994)
* 👥 fix(assistants): Improve Error handling (#2012)
* feat: make assistants endpoint appendable since message state is not managed by LibreChat
* fix(ask): search currentMessages for thread_id if it's not defined
* refactor(abortMiddleware): remove use of `overrideProps` and spread unknown fields instead
* chore: remove console.log in `abortConversation`
* refactor(assistants): improve error handling/cancellation flow
* 🖼️ fix: Clipboard Files & File Name Issues (#2015)
* fix: ensure image handling fetchs image to base64 for multiple images
* fix: append file_id's when writing uploaded files
* feat: timestamp files uploaded from clipboard
* chore: add a different fileid+name separator
* 🖊️chore: fix deployment guides (#2021)
* 🔖 chore: update groq models (#2031)
* 🔧 fix(ThemeContext): Listen for Theme Changes (#2037)
* fix(ThemeContext): listen for changes
* fix(Dropdown): theme auto-update not working
* ✏️ docs: Railway, Traefik, and Improvements (#2060)
* docs: documentation guidelines
* docs: deploy documentation update
* 📧 update email templates (#2057)
* 📧 chore: update email templates
* 📧 update password reset confirmation
* ✍️ refactor(Textarea): Optimize Text Input & Enhance UX (#2058)
* refactor(useDebouncedInput): make object as input arg and accept setter
* refactor(ChatForm/Textarea): consolidate textarea/form logic to one component, use react-hook-form, programmatically click send button instead of passing submitMessage, forwardRef and memoize SendButton
* refactor(Textarea): use Controller field value to avoid manual update of ref
* chore: remove forms provider
* chore: memoize AttachFile
* refactor(ChatForm/SendButton): only re-render SendButton when there is text input
* chore: make iconURL bigger
* chore: optimize Root/Nav
* refactor(SendButton): memoize disabled prop based on text
* chore: memoize Nav and ChatForm
* chore: remove textarea ref text on submission
* feat(EditMessage): Make Esc exit the edit mode and dismiss changes when editing a message
* style(MenuItem): Display the ☑️ icon only on the selected model
* 🔧 style(SidePanel): Center buttons when collapsed (#2045)
* feat: Improve Google search plugin to assistants
* 🔧 fix(Nav SidePanel): Center buttons when collapsed
* 🖌️ style: Minor UI Updates (#2011)
* UI Design update
* Add an error icon next to the avatar.
* fix
* Change the style of buttons
* fix: avatar
* 🌐 feat: librechat.yaml from URL (#2064)
* feat: librechat.yaml from URL
* doc update: librechat.yaml from URL
* update dotenv.md - typo
* Update loadCustomConfig.js
* ci: specs for loadCustomConfig
* fix(processFileURL): safe destructuring of saveURL result
---------
Co-authored-by: fuegovic
Co-authored-by: Fuegovic <32828263+fuegovic@users.noreply.github.com>
* 🌑 style(AnthropicIcon): adjust for Dark Mode
* 🧊 style: Adjust Endpoint Icons (#2070)
* 🧊 style: Adjust Endpoint Icons
* Update MessageParts.tsx
* 🧹 chore: remove unused import (#2072)
* chore: Update AnthropicIcon.tsx
* 🔧 fix: Convo Corners & Updated Colors (#2046)
* 🔧 fix: Convo Corners & Updated Colors
* refactored code
* chore: JSON.parse with a try/catch block, removed useless useEffect & and restored Focus
* restored typescript
* import all back
* ⛔ style: Auth Error and Preset Items Styling (#2069)
* Change the style of the error message.
* ui preset items
* fix style
* Change the color of the border and adjust the background of the selected input
* 🌍 : Update Russian Translation (#2061)
* feat(chore): add missing translations in Ru.tsx
* feat(chore): add missing translation for My Files menu and headers
* change com_ui_my_files to com_ui_nav_files
* move useLocalize above utils
* feat(chore): add missing translation for My Files menu and headers
* 📜 feat: Customize Privacy Policy & Terms of Service (#2091)
* 🅰️ feat: Azure OpenAI Assistants API Support (#1992)
* chore: rename dir from `assistant` to plural
* feat: `assistants` field for azure config, spread options in AppService
* refactor: rename constructAzureURL param for azure as `azureOptions`
* chore: bump openai and bun
* chore(loadDefaultModels): change naming of assistant -> assistants
* feat: load azure settings with currect baseURL for assistants' initializeClient
* refactor: add `assistants` flags to groups and model configs, add mapGroupToAzureConfig
* feat(loadConfigEndpoints): initialize assistants endpoint if azure flag `assistants` is enabled
* feat(AppService): determine assistant models on startup, throw Error if none
* refactor(useDeleteAssistantMutation): send model along with assistant id for delete mutations
* feat: support listing and deleting assistants with azure
* feat: add model query to assistant avatar upload
* feat: add azure support for retrieveRun method
* refactor: update OpenAIClient initialization
* chore: update README
* fix(ci): tests passing
* refactor(uploadOpenAIFile): improve logging and use more efficient REST API method
* refactor(useFileHandling): add model to metadata to target Azure region compatible with current model
* chore(files): add azure naming pattern for valid file id recognition
* fix(assistants): initialize openai with first available assistant model if none provided
* refactor(uploadOpenAIFile): add content type for azure, initialize formdata before azure options
* refactor(sleep): move sleep function out of Runs and into `~/server/utils`
* fix(azureOpenAI/assistants): make sure to only overwrite models with assistant models if `assistants` flag is enabled
* refactor(uploadOpenAIFile): revert to old method
* chore(uploadOpenAIFile): use enum for file purpose
* docs: azureOpenAI update guide with more info, examples
* feat: enable/disable assistant capabilities and specify retrieval models
* refactor: optional chain conditional statement in loadConfigModels.js
* docs: add assistants examples
* chore: update librechat.example.yaml
* docs(azure): update note of file upload behavior in Azure OpenAI Assistants
* chore: update docs and add descriptive message about assistant errors
* fix: prevent message submission with invalid assistant or if files loading
* style: update Landing icon & text when assistant is not selected
* chore: bump librechat-data-provider to 0.4.8
* fix(assistants/azure): assign req.body.model for proper azure init to abort runs
* 🤖 fix(assistants): Default Capabilities and Retrieval Models (#2102)
* 📑 docs: update .env.example (#2109)
* 🌍 : Update Chinese Translations (#2098)
* 🪙 feat: Assistants Token Balance & other improvements (#2114)
* chore: add assistants to supportsBalanceCheck
* feat(Transaction): getTransactions and refactor export of model
* refactor: use enum: ViolationTypes.TOKEN_BALANCE
* feat(assistants): check balance
* refactor(assistants): only add promptBuffer if new convo (for title), and remove endpoint definition
* refactor(assistants): Count tokens up to the current context window
* fix(Switcher): make Select list explicitly controlled
* feat(assistants): use assistant's default model when no model is specified instead of the last selected assistant, prevent assistant_id from being recorded in non-assistant endpoints
* chore(assistants/chat): import order
* chore: bump librechat-data-provider due to changes
* 🎨 style: Privacy Policy & Terms of Service (#2111)
* 🖊️ fix(MessageContent): Error Message typo (#2112)
* 🔧 feat: Share Assistant Actions between Users (#2116)
* fix: remove unique field from assistant_id, which can be shared between different users
* refactor: remove unique user fields from actions/assistant queries
* feat: only allow user who saved action to delete it
* refactor: allow deletions for anyone with builder access
* refactor: update user.id when updating assistants/actions records, instead of searching with it
* fix: stringify response data in case it's an object
* fix: correctly handle path input
* fix(decryptV2): handle edge case where value is already decrypted
* 🔧 fix: Remove `Unique` Index from Actions Model and Initialize Empty Actions for Deletion (#2118)
* 🔧 feat(actions): Allow Multiple Actions from Same Domain per Assistant (#2120)
* 🪰 fix: Azure Parsing and Assistants Payload (#2133)
* fix(azure): fix regex to prevent edge cases
* fix(assistants): pass relevant endpoint options to avoid sending them to API
* 🔍 feat: Filter MultiSelect and SelectDropDown (+variants) + CSS fixes for Scrollbar (#2138)
* Initial implementation of MultiSearch. Added implementation to MultiSelect and SelectDropDown and variants
* Update scrollbar styles to prevent breakages on Chrome
* Revert changes to vite.config.ts (redundant for now)
* chore(New Chat): organize imports
* style(scrollbar-transparent): use webkit as standard, expected behavior
* chore: useCallback for mouse enter/leave
* fix(Footer): resolve map key error
* chore: memoize Conversations
* style(MultiSearch): improve multisearch styling
* style: dark mode search input
* fix: react warnings due to unrecognize html props
* chore: debounce OpenAI settings inputs
* fix(useDebouncedInput): only use event value as newValue if not object
---------
Co-authored-by: Flynn
* 📋 chore: add requirements.txt to documentation (#2122)
* chore: add requirements.txt to documentation, to ease maintenance
* docs: Update documentation_guidelines.md
* 🔧 style(fix): Convo Title Fade Effect (#2117)
* feat: Improve Google search plugin to assistants
* 🔧 fix(Nav SidePanel): Center buttons when collapsed
* 🔧(fix) Convo title fade effect
* 🔧(fix) Convo title fade effect / remove deletion
* 🔧(fix) Convo title fade effect / remove deletion .env.example
* 🔧(fix) Convo title fade effect
---------
Co-authored-by: Danny Avila
* Revert "🔧 style(fix): Convo Title Fade Effect (#2117)" (#2139)
This reverts commit 1796821888ff09fbe0e9912408cb1387822ee866.
* 🎨 style: HoverButton UI adjustment, change code font (#2017)
* style: HoverButton UI adjustment
* style: make Consolas as default code font
---------
Co-authored-by: Danny Avila
* 🗃️ feat: General File Support for OpenAI, Azure, Custom, Anthropic and Google (RAG) (#2143)
* refactor: re-purpose `resendImages` as `resendFiles`
* refactor: re-purpose `resendImages` as `resendFiles`
* feat: upload general files
* feat: embed file during upload
* feat: delete file embeddings on file deletion
* chore(fileConfig): add epub+zip type
* feat(encodeAndFormat): handle non-image files
* feat(createContextHandlers): build context prompt from file attachments and successful RAG
* fix: prevent non-temp files as well as embedded files to be deleted on new conversation
* fix: remove temp_file_id on usage, prevent non-temp files as well as embedded files to be deleted on new conversation
* fix: prevent non-temp files as well as embedded files to be deleted on new conversation
* feat(OpenAI/Anthropic/Google): basic RAG support
* fix: delete `resendFiles` only when true (Default)
* refactor(RAG): update endpoints and pass JWT
* fix(resendFiles): default values
* fix(context/processFile): query unique ids only
* feat: rag-api.yaml
* feat: file upload improved ux for longer uploads
* chore: await embed call and catch embedding errors
* refactor: store augmentedPrompt in Client
* refactor(processFileUpload): throw error if not assistant file upload
* fix(useFileHandling): handle markdown empty mimetype issue
* chore: necessary compose file changes
* 📧 fix: Correct Handling of Self-Signed Certificates in `sendEmail` (#2148)
- note: To put it in a different way, if you put rejectUnauthorized: true, it means that self-signed certificates should not be allowed. This means, that EMAIL_ALLOW_SELFSIGNED is set to false
* ➰ fix(sendEmail): circular dependency
* 🔬 chore: Add Circular Dependency Check to `backend-review` (#2149)
* 🔬 chore: Add Circular Dependency check to `backend-review`
* chore: touch random file for workflow trigger
* chore: workflow step order
* chore: update workflow to create empty auth.json file
* fix: attempt empty auth.json creation
* chore: add test_bundle ESLint ignore pattern
* 🔧 style(fix): Convo Fade Effect (#2147)
* 🔧 (fix) Convo Fade Effect
* 🔧style(fix): Convo Fade Effect (#2117)
* 🔧 style(fix): Convo Fade Effect (#2117)
* 🖌️ style: Improve Dark Theme Accessibility (#2125)
* style: all landing page components
* chore: converted all slate to gray, since slate doesnt work
* style: assistant panel
* style: basic UI components, userprovided, preset
* style: update in multiple components
* fix(PluginStoreDialog): justify-center
* fixed some minor Ui styles
* style(MultiSearch): update dark bg
* style: update Convo styling
* style: lower textarea max height slightly
---------
Co-authored-by: Danny Avila
* 🔧 fix(menu): Menu Item Filter Improvements (#2153)
* small-fix: Ensure that fake seperators in model lists do not show in search
* Ensure Plugin search uses correct placeholder and key filtering in search
* 🚀 feat: Add ShuttleAI as Known Endpoint (#2152)
Added new Official Known Endpoint (ShuttleAI)
* 🌑 style(File Manager): Localize and Update Dark Mode Stylings (#2155)
* 🌑 style: Update Dark Mode Stylings for File Manager
* 🌐 feat: localize file manager text
* 🌐 feat: file panel table localization
* 🐋 chore: Cleanup Dockerfile (#2156)
* 🐋 fix(Dockerfile): add back additional deps., handle permissions, use `--no-audit` flag on install (#2157)
* 🐋 chore: Revise of PR #2157, move step earlier
* chore: Revise of PR #2157, move global steps earlier, execute as root
* 📖 docs: Update ShuttleAI Fibonacci Image (#2160)
* 🚀 feat: Assistants Streaming (#2159)
* chore: bump openai to 4.29.0 and npm audit fix
* chore: remove unnecessary stream field from ContentData
* feat: new enum and types for AssistantStreamEvent
* refactor(AssistantService): remove stream field and add conversationId to text ContentData
> - return `finalMessage` and `text` on run completion
> - move `processMessages` to services/Threads to avoid circular dependencies with new stream handling
> - refactor(processMessages/retrieveAndProcessFile): add new `client` field to differentiate new RunClient type
* WIP: new assistants stream handling
* chore: stores messages to StreamRunManager
* chore: add additional typedefs
* fix: pass req and openai to StreamRunManager
* fix(AssistantService): pass openai as client to `retrieveAndProcessFile`
* WIP: streaming tool i/o, handle in_progress and completed run steps
* feat(assistants): process required actions with streaming enabled
* chore: condense early return check for useSSE useEffect
* chore: remove unnecessary comments and only handle completed tool calls when not function
* feat: add TTL for assistants run abort cacheKey
* feat: abort stream runs
* fix(assistants): render streaming cursor
* fix(assistants): hide edit icon as functionality is not supported
* fix(textArea): handle pasting edge cases; first, when onChange events wouldn't fire; second, when textarea wouldn't resize
* chore: memoize Conversations
* chore(useTextarea): reverse args order
* fix: load default capabilities when an azure is configured to support assistants, but `assistants` endpoint is not configured
* fix(AssistantSelect): update form assistant model on assistant form select
* fix(actions): handle azure strict validation for function names to fix crud for actions
* chore: remove content data debug log as it fires in rapid succession
* feat: improve UX for assistant errors mid-request
* feat: add tool call localizations and replace any domain separators from azure action names
* refactor(chat): error out tool calls without outputs during handleError
* fix(ToolService): handle domain separators allowing Azure use of actions
* refactor(StreamRunManager): types and throw Error if tool submission fails
* 📂 feat: RAG Improvements (#2169)
* feat: new vector file processing strategy
* chore: remove unused client files
* chore: remove more unused client files
* chore: remove more unused client files and move used to new dir
* chore(DataIcon): add className
* WIP: Model Endpoint Settings Update, draft additional context settings
* feat: improve parsing for augmented prompt, add full context option
* chore: remove volume mounting from rag.yml as no longer necessary
* 📚 docs: Fix Broken Links (#2171)
Fix broken links to the custom config file on `timeoutMs` and `supportedIds`.
* 🎉 feat: Optimizations and Anthropic Title Generation (#2184)
* feat: add claude-3-haiku-20240307 to default anthropic list
* refactor: optimize `saveMessage` calls mid-stream via throttling
* chore: remove addMetadata operations and consolidate in BaseClient
* fix(listAssistantsForAzure): attempt to specify correct model mapping as accurately as possible (#2177)
* refactor(client): update last conversation setup with current assistant model, call newConvo again when assistants load to allow fast initial load and ensure assistant model is always the default, not the last selected model
* refactor(cache): explicitly add TTL of 2 minutes when setting titleCache and add default TTL of 10 minutes to abortKeys cache
* feat(AnthropicClient): conversation titling using Anthropic Function Calling
* chore: remove extraneous token usage logging
* fix(convos): unhandled edge case for conversation grouping (undefined conversation)
* style: Improved style of Search Bar after recent UI update
* chore: remove unused code, content part helpers
* feat: always show code option
* 👓 feat: Vision Support for Assistants (#2195)
* refactor(assistants/chat): use promises to speed up initialization, initialize shared variables, include `attachedFileIds` to streamRunManager
* chore: additional typedefs
* fix(OpenAIClient): handle edge case where attachments promise is resolved
* feat: createVisionPrompt
* feat: Vision Support for Assistants
* 🔧 fix(assistants): Vision minor fix & Add Docs (#2196)
* 👓 fix(assistants): Only Retrieve Assistant Data for Vision Requests if attachments exist in Host Storage
* docs: add capability
* 🧑💻docs: Update General Docs and Contribution Guidelines (#2194)
* doc upddate: documentation_guidelines.md
* doc upddate: how_to_contribute.md
* doc upddate: testing.md / how_to_contribute.md
* doc upddate: translation_contribution.md/testing.md/how_to_contribute.md
* doc upddate: coding_conventions.md
* fix formatting: how_to_contribute.md
* fix formatting (again) : how_to_contribute.md
* 🌟 fix: Handle Assistants Edge Cases, Improve Filter Styling (#2201)
* fix(assistants): default query to limit of 100 and `desc` order
* refactor(useMultiSearch): use object as params and fix styling for assistants
* feat: informative message for thread initialization failing due to long message
* 🐞 fix(client): Prevent Async Reset of Latest Message (#2203)
* refactor: use debug statement runStepCompleted message
* fix(ChatRoute): prevent use of `newConversation` from reseting `latestMessage`, which would fire asynchronously and finalize after `latestMessage` was already correctly set
* 📖 docs: Add details for Azure OpenAI Assistants (#2173)
The default `.env` contains the line `ASSISTANTS_API_KEY=user_provided`. When pre-configuring Azure OpenAI models, this setting makes it impossible to use assistants due to a missing user provided key. Only by commenting the line out the Azure setup works.
* 🎨 style: Refine SidePanel and Textarea Styling (#2209)
* experimental: use TextareaAutosize wrapper with useLayoutEffect to hopefully fix random textarea jankiness
* fix(Textarea): force a resize when placeholder text changes
* style(ScrollToBottom): update styling for scroll button
* style: memoize values and improve side panel toggle states
* refactor(SidePanel): more control for toggle states, new hide panel button, and improve toggle state logic
* chore: hide resizable panel handle on smaller screens
* 🎨 style: Ensure Side Panel state Remains on Refresh (#2210)
* 🧹 chore: Update Docker Docs & Make `cache` field Optional for Custom Config (#2211)
* docs: updating docker
* fix(customConfig): make `cache` field optional as intended (though not recommended for local setups)
* 🚀 feat: Add GitHub Actions Workflow for Generating Docs Embeddings (#2216)
* 🔍 chore: Clean Up Documentation (#2217)
* fix(initializeClient.spec.js): remove condition failing test on local installations
* docs: remove comments and invalid html as is required by embeddings generator and add new documentation guidelines
* 🔍 chore: Clean Up Documentation Part 2 (#2218)
* 🔍 chore: Clean Up Documentation Part 3 (#2219)
* 🔍 chore: Clean Up Documentation Pt. 4 (#2220)
* 🔗 docs: Fix Link to Code of Conduct (#2206)
Fix link to Code of Conduct
* 💽 feat: Add CONSOLE_JSON for deploying to GCP K8S env (#2146)
* Add CONSOLE_JSON
* Update example env
* Moved to utils
* 🔎 chore: bump meilisearch v1.7 / v0.38.0 (#2175)
* 🔎 chore: bump meilisearch v1.7 / v0.38.0
* ✏️ breaking_changes.md
* 💽 feat: Add Script for User Stats (#2224)
* ✏️ doc update: dotenv.md (#2226)
* ⬇️ feat: Assistant File Downloads (#2234)
* WIP: basic route for file downloads and file strategy for generating readablestream to pipe as res
* chore(DALLE3): add typing for OpenAI client
* chore: add `CONSOLE_JSON` notes to dotenv.md
* WIP: first pass OpenAI Assistants File Output handling
* feat: first pass assistants output file download from openai
* chore: yml vs. yaml variation to .gitignore for `librechat.yml`
* refactor(retrieveAndProcessFile): remove redundancies
* fix(syncMessages): explicit sort of apiMessages to fix message order on abort
* chore: add logs for warnings and errors, show toast on frontend
* chore: add logger where console was still being used
* ⚓ refactor(loadConfigModels): Fallback to Default Models if Fetch Fails (#2236)
* 🌍 : Updated & Added new Portuguese and Spanish Translations (#2228)
* 🌍 : Updated & Added news Portuguese and Spanish Translations
* fix: \' to "
* fix(Br.tsx): revert Snyk placeholders
* fix(Es.tsx): revert Snyk placeholders
---------
Co-authored-by: Danny Avila
* 🗨️ fix(useSSE): Prevent 'New Chat' Title after Regenerating Initial Message (#2238)
* ⚓ refactor(loadConfigModels): Stricter Default Model Fallback (#2239)
* chore: add TEndpoint type/typedef
* refactor(loadConfigModels.spec): stricter default model matching (fails with current impl.)
* refactor(loadConfigModels): return default models on endpoint basis and not fetch basis
* refactor: rename `uniqueKeyToNameMap` to `uniqueKeyToEndpointsMap` for clarity
* 🌍 fix(Translations): Map Partial `langCode` and Add Unit Tests (#2240)
* 🔧 fix: Improve Assistants File Citation & Download Handling (#2248)
* fix(processMessages): properly handle assistant file citations and add sources list
* feat: improve file download UX by making any downloaded files accessible within the app post-download
* refactor(processOpenAIImageOutput): correctly handle two different outputs for images since OpenAI generates a file in their storage, shares filepath for image rendering
* refactor: create `addFileToCache` helper to use across frontend
* refactor: add ImageFile parts to cache on processing content stream
* 🐳 feat: RAG for Default Docker Compose Files + Docs Update (#2246)
* refactor(deploy-compose.yml): use long-syntax to avoid implicit folder creation of librechat.yaml
* refactor(docker-compose.override.yml.example): use long-syntax to avoid implicit folder creation of librechat.yaml
* chore: add simple health check for RAG_API_URL
* chore: improve axios error handling, adding `logAxiosError`
* chore: more informative message detailing RAG_API_URL path
* feat: add rag_api and vectordb to default compose file
* chore(rag.yml): update standalone rag compose file to use RAG_PORT
* chore: documentation updates
* docs: Update rag_api.md with images
* Update rag_api.md
* Update rag_api.md, assistants clarification
* add RAG API note to breaking changes
* 🎨 fix: Optimize StableDiffusion API Tool and Fix for Assistants Usage (#2253)
* chore: update docs
* fix(StableDiffusion): optimize API responses and file handling, return expected metadata for Assistants endpoint
* 🚀 feat: Add CLI Helper Scripts to API Container Image (#2257)
* 🐞 fix: Handle Garbled Chinese Characters in File Upload (#2261)
Co-authored-by: 彭修照
* 🐞 fix: Handle Empty Model Error in Assistants Form (#2265)
* 📋 fix: Ensure Textarea Resizes in Clipboard Edge Case (#2268)
* chore: ts-ignore fake conversation data used for testing
* chore(useTextarea): import helper functions to declutter hook
* fix(Textarea): reset textarea value explicitly by resetting `textAreaRef.current.value`
* ✨ v0.7.0 (#2266)
* ✨ v0.7.0
* chore: gitignore
* 🐳 ci: update release image workflows
* 🐞Fix: Stable Diffusion User Directory (#2270)
* 🐳 hotfix: Necessary Dockerfile Update (#2271)
* chore: remove version comment from pre-commit shell script
* chore: Dockerfile update
* 🐳 hotfix: Tag Images Workflow Update (#2272)
* ✨ v0.7.0 (#2273)
* 🛂 feat: Required OpenID Role (#2279)
* feat: add possibility to filter by roles for OpenID provider
---------
Co-authored-by: Sirius
* 📗 docs: Update NagaAI (#2278)
* 🐋 fix(Dockerfile): Create Necessary Directories at Build time (#2277)
When creating volumes for /app/client/public/images and /app/api/logs
docker will inherit the permissions from the existing directores in the
image. Since they are missing it defaults to root, and since
librechat now uses the "node" user instead of "root" storing images,
files and logs will fail.
Fix by creating those directories in the docker image with the node
user, so that if docker creates the volumes the permissions are inherited
and the directories are owned by "node" and not "root".
* 📖 docs: Note on 'host.docker.internal' for Ollama Config (#2274)
* docs: update URL to access ollama and comment on 'host.docker.internal'
* Update ai_endpoints.md
---------
Co-authored-by: Danny Avila
* 📝 docs: Remove Google Domains Reference (#2267)
* 🔨 fix(ToolService): remove userId filter from `loadActionSets` & Docs Update (#2286)
* fix(ToolService): remove userId filter from `loadActionSets`
* docs: updates to rag_api and docker_override explaining key variable conflicts
* 🔗 docs: Fix Link to Docker Compose Override File (#2287)
* 🌍: Add new Japanese Localization entries (#2282)
* 🛠️ fix: Correct Unwanted Newlines after Undo in Textarea (#2289)
* docs: edit docker_override note for deploy-compose
* 🛠️ fix: Correct Unwanted Newlines after Undo in Textarea
* 🔄 chore: converted translation files to `.ts` (#2288)
* chore: converted translation files to
* chore(Sv.ts): removed and the comment
* chore: add comment
* 🔧 fix: Catch `deleteVectors` Errors & Update RAG API docs (#2299)
* fix(deleteVectors): handle errors gracefully
* chore: update docs based on new alternate env vars prefixed with RAG to avoid conflicts with LibreChat keys
* 🚥 docs: fixed Traefik web layout (#2305)
Fixed Traefik config for broken web rending
* 🦙 docs: Update Ollama + LiteLLM Instructions (#2302)
* Update litellm.md
* set OPENAI_API_KEY of litellm service (needs to be set if ollama's openai api compatibility is used)
* 🐋 refactor(Dockerfile.multi): Optimize client build by caching npm install step (#2275)
* 🐋 fix(Dockerfile): Optimize client build by caching npm install step
* 🐋 fix(Dockerfile): Possible interference from librechat-data-provider in client build
* 📝 docs: Correct Google OAuth Callback URL Example (#2311)
* 🧠 feat: Cohere support as Custom Endpoint (#2328)
* chore: bump cohere-ai, fix firebase vulnerabilities by going down versions
* feat: cohere rates and context windows
* feat(createCoherePayload): transform openai payload for cohere compatibility
* feat: cohere backend support
* refactor(UnknownIcon): optimize icon render and add cohere
* docs: add cohere to Compatible AI Endpoints
* Update ai_endpoints.md
* 🧠 fix(Cohere): map to expected SDK params (#2329)
* 💽 refactor(client): Optimize ModelsConfig Query Cache (#2330)
* refactor(client): remove double caching of models via recoil to rely exclusively on react-query
* chore(useConversation): add modelsQuery.data dep to callback
* 🚦 docs: Update `traefik.md` - Documentation Fix for edge case race condition (#2322)
Sometimes Traefik created a race condition where LibreChat was up on tcp/3080, and while Traefik was up on tcp/443, it could not route to the LibreChat container due to the multiple interfaces -- depending on how they came up. This is easily solved by simply using one interface.
* 🚅 docs: Working Examples for LiteLLM, Docker, LibreChat and LiteLLM models for AWS, Azure, GCP (#2323)
Updated documentation with working config examples and clarifying many details.
Added working examples for:
* LiteLLM (litellm/litellm-config.yaml)
* Docker (docker-compose.override.yml)
* LibreChat (librechat.yaml)
Added LiteLLM "ready to use" model for:
* AWS Bedrock
* Azure OpenAI
* OpenAI
* GCP
* 🚅 docs(ai_endpoints): Reflect correct LiteLLM baseURL when using docker-compose (#2324)
Added note to LiteLLM baseURL to reflect docker-compose usage
* 🚧 refactor: Attempt Default Preset Fix & Other Changes (#2342)
* fix(useTextarea): trigger SendButton re-render on undo and clearing text
* refactor(PresetItems): show pin icon for default preset
* fix(ChatRoute): do not use conversation.model for useEffect, do not set default Preset if real model list is not yet fetched
* 📘 docs: Add Note to `nginx.md` (#2341)
added reference for the need to do 'sudo apt update'
* ⚠️ docs: Default Value Warnings & Docker Docs Update (#2343)
* feat(AppService): default secret value warnings
* docs: update docker/ubuntu related guides
* 🚀 feat: Enhance Message Editing with File Resubmission (#2347)
* chore: fix type issue with File Table fakeData
* refactor: new lazy loading image strategy and load images/files as part of Message Container
* feat: resubmit files when editing messages with attached files
* 🐞 fix: Balance and Token Usage Improvements (#2350)
* fix(processModelData): handle `openrouter/auto` edge case
* fix(Tx.create): prevent negative multiplier edge case and prevent balance from becoming negative
* fix(NavLinks): render 0 balance properly
* refactor(NavLinks): show only up to 2 decimal places for balance
* fix(OpenAIClient/titleConvo): fix cohere condition and record token usage for `this.options.titleMethod === 'completion'`
* 🎨 feat: Title Improvements (#2363)
* fix(assistants): keep generated title upon continued messages in active conversation
* feat: update document.title on successful gentitle mutation
* ⬇️ refactor: Assistant File Downloads (#2364)
* refactor(getFiledownload): explicit accept of `application/octet-stream`
* chore: test compose file
* chore: test compose file fix
* chore(files/download): add more logs
* Fix proxy_pass URLs in nginx.conf
* fix: proxy_pass URLs in nginx.conf to fix file downloads from URL
* chore: move test compose file to utils dir
* refactor(useFileDownload): simplify API request by passing `file_id` instead of `filepath`
* 🌍: Update Chinese Translation (#2351)
Update Simplified Chinese Translation
* 👟 style: CodeAnalyze Animation (#2348)
* refactor(CodeAnaluzer SVG)
* refactor(CodeAnalyzer SVG)
* style: center terminal animation, reduce scaling
---------
Co-authored-by: Danny Avila
Co-authored-by: Danny Avila
* 🧑🎨 style: Remove Plugins Icon Background (#2368)
* 📦 feat: Model & Assistants Combobox for Side Panel (#2380)
* WIP: dynamic settings
* WIP: update tests and validations
* refactor(SidePanel): use hook for Links
* WIP: dynamic settings, slider implemented
* feat(useDebouncedInput): dynamic typing with generic
* refactor(generate): add `custom` optionType to be non-conforming to conversation schema
* feat: DynamicDropdown
* refactor(DynamicSlider): custom optionType handling and useEffect for conversation updates elsewhere
* refactor(Panel): add more test cases
* chore(DynamicSlider): note
* refactor(useDebouncedInput): import defaultDebouncedDelay from ~/common`
* WIP: implement remaining ComponentTypes
* chore: add com_sidepanel_parameters
* refactor: add langCode handling for dynamic settings
* chore(useOriginNavigate): change path to '/c/'
* refactor: explicit textarea focus on new convo, share textarea idea via ~/common
* refactor: useParameterEffects: reset if convo or preset Ids change, share and maintain statefulness in side panel
* wip: combobox
* chore: minor styling for Select components
* wip: combobox select styling for side panel
* feat: complete combobox
* refactor: model select for side panel switcher
* refactor(Combobox): add portal
* chore: comment out dynamic parameters panel for future PR and delete prompt files
* refactor(Combobox): add icon field for options, change hover bg-color, add displayValue
* fix(useNewConvo): proper textarea focus with setTimeout
* refactor(AssistantSwitcher): use Combobox
* refactor(ModelSwitcher): add textarea focus on model switch
* 🤖 feat: Update Context Limit for `gpt-3.5-turbo` (#2381)
* 👷 fix: Minor Fixes and Refactors (#2388)
* refactor(useTextarea): set Textarea disabled message due to key higher in priority
* fix(SidePanel): intended behavior for non-user provided keys
* fix: generate specs
* style: update combobox styling as before, with better dynamic height
* chore: remove unused import
* 🔒 feat: Authenticated Image Requests (#2389)
* 🔒 feat: Authenticated Image Requests
* fix: reserved keyword `static`
* pulling in latest from upstream
---------
Co-authored-by: Danny Avila
Co-authored-by: Linus Gasser
Co-authored-by: Kakenyan <38045575+Kakenyan@users.noreply.github.com>
Co-authored-by: Fuegovic <32828263+fuegovic@users.noreply.github.com>
Co-authored-by: zimmra
Co-authored-by: marlonka <153027575+marlonka@users.noreply.github.com>
Co-authored-by: marlonka
Co-authored-by: Raí Santos <140329135+itzraiss@users.noreply.github.com>
Co-authored-by: Walber Cardoso
Co-authored-by: Ryohei Kamiya
Co-authored-by: MACHINSOFT <110278369+machinsoft@users.noreply.github.com>
Co-authored-by: Yuanlin Lin
Co-authored-by: Marco Beretta <81851188+Berry-13@users.noreply.github.com>
Co-authored-by: Aleksei Lisikhin <475367+Nihisil@users.noreply.github.com>
Co-authored-by: bsu3338
Co-authored-by: Danny Avila
Co-authored-by: Trevor Swanson <83826109+trevorswanson@users.noreply.github.com>
Co-authored-by: CXwudi
Co-authored-by: Walber Cardoso <50842245+walbercardoso@users.noreply.github.com>
Co-authored-by: Arno Angerer
Co-authored-by: Andreas <21068727+ndrsfel@users.noreply.github.com>
Co-authored-by: afel
Co-authored-by: Ido Ophir
Co-authored-by: fuegovic
Co-authored-by: Vilmondes Queiroz
Co-authored-by: Alexei Smirnov
Co-authored-by: Flynn
Co-authored-by: SailFlorve
Co-authored-by: Hermes Trismegistus <91907013+herumes@users.noreply.github.com>
Co-authored-by: Florian Kohrt
Co-authored-by: suzuki.sh
Co-authored-by: Ivan Dachev
Co-authored-by: pxz2016 <303796828@qq.com>
Co-authored-by: 彭修照
Co-authored-by: Sirius
Co-authored-by: Zentix <65168490+ZentixUA@users.noreply.github.com>
Co-authored-by: Christoph Reiter
Co-authored-by: Till Zoppke
Co-authored-by: illgitthat
Co-authored-by: Andi
Co-authored-by: happy_ryo
Co-authored-by: Ventz Petkov <901168+ventz@users.noreply.github.com>
Co-authored-by: Marius <90092216+mariusgau@users.noreply.github.com>
Co-authored-by: ochen1 <59662605+ochen1@users.noreply.github.com>
Co-authored-by: Paul
Co-authored-by: David LaPorte
Co-authored-by: chrislbrown84 <129366063+chrislbrown84@users.noreply.github.com>
Co-authored-by: Melaton <77706174+HundSimon@users.noreply.github.com>
Co-authored-by: Danny Avila
Co-authored-by: matt burnett
---
.devcontainer/Dockerfile | 5 +
.devcontainer/devcontainer.json | 3 +-
.devcontainer/docker-compose.yml | 8 +-
.env.example | 163 +-
.eslintrc.js | 7 +
.github/CODE_OF_CONDUCT.md | 2 +-
.github/CONTRIBUTING.md | 2 +-
.github/ISSUE_TEMPLATE/BUG-REPORT.yml | 2 +-
.github/SECURITY.md | 6 +-
.github/pull_request_template.md | 6 +-
.github/workflows/backend-review.yml | 18 +
.github/workflows/container.yml | 83 -
.github/workflows/dev-images.yml | 63 +-
.github/workflows/generate_embeddings.yml | 20 +
.github/workflows/latest-images-main.yml | 40 -
.github/workflows/main-image-workflow.yml | 69 +
.github/workflows/tag-images.yml | 67 +
.gitignore | 10 +-
.husky/pre-commit | 2 +-
Dockerfile | 25 +-
Dockerfile.multi | 7 +-
README.md | 77 +-
api/app/chatgpt-browser.js | 3 +-
api/app/clients/AnthropicClient.js | 492 +-
api/app/clients/BaseClient.js | 74 +-
api/app/clients/ChatGPTClient.js | 203 +-
api/app/clients/GoogleClient.js | 82 +-
api/app/clients/OpenAIClient.js | 363 +-
api/app/clients/PluginsClient.js | 6 +-
api/app/clients/llm/createCoherePayload.js | 85 +
api/app/clients/llm/createLLM.js | 8 +-
api/app/clients/llm/index.js | 2 +
.../clients/prompts/createContextHandlers.js | 159 +
api/app/clients/prompts/createVisionPrompt.js | 34 +
api/app/clients/prompts/formatMessages.js | 18 +-
.../clients/prompts/formatMessages.spec.js | 5 +-
api/app/clients/prompts/index.js | 4 +
api/app/clients/prompts/titlePrompts.js | 56 +
api/app/clients/specs/BaseClient.test.js | 5 +-
api/app/clients/specs/PluginsClient.test.js | 41 +-
api/app/clients/tools/DALL-E.js | 66 +-
api/app/clients/tools/GoogleSearch.js | 121 -
api/app/clients/tools/index.js | 43 +-
api/app/clients/tools/manifest.json | 30 +-
.../clients/tools/structured/AzureAISearch.js | 18 +-
api/app/clients/tools/structured/DALLE3.js | 55 +-
.../clients/tools/structured/GoogleSearch.js | 65 +
.../tools/structured/StableDiffusion.js | 93 +-
.../tools/structured/TavilySearchResults.js | 92 +
.../tools/structured/TraversaalSearch.js | 89 +
api/app/clients/tools/structured/Wolfram.js | 5 +-
.../tools/structured/specs/DALLE3.spec.js | 14 +-
api/app/clients/tools/util/handleTools.js | 127 +-
.../clients/tools/util/handleTools.test.js | 121 +-
api/app/clients/tools/util/loadToolSuite.js | 44 +-
api/cache/getLogStores.js | 28 +-
api/config/parsers.js | 4 +
api/config/paths.js | 7 +
api/config/winston.js | 28 +-
api/models/Action.js | 68 +
api/models/Assistant.js | 47 +
api/models/Balance.js | 4 +-
api/models/Conversation.js | 6 +-
api/models/File.js | 28 +-
api/models/Message.js | 40 +-
api/models/Transaction.js | 46 +-
api/models/checkBalance.js | 6 +-
api/models/index.js | 9 +-
api/models/plugins/mongoMeili.js | 9 +
api/models/schema/action.js | 59 +
api/models/schema/assistant.js | 33 +
api/models/schema/convoSchema.js | 2 +-
api/models/schema/defaults.js | 41 +-
api/models/schema/fileSchema.js | 17 +
api/models/schema/messageSchema.js | 11 +-
api/models/spendTokens.js | 23 +-
api/models/tx.js | 28 +-
api/models/tx.spec.js | 9 +
api/models/userMethods.js | 46 +
api/package.json | 25 +-
api/server/controllers/AskController.js | 59 +-
api/server/controllers/AuthController.js | 10 +-
api/server/controllers/EditController.js | 53 +-
api/server/controllers/EndpointController.js | 21 +-
api/server/controllers/ErrorController.js | 15 +-
api/server/controllers/ModelController.js | 31 +-
api/server/controllers/PluginController.js | 84 +-
api/server/controllers/UserController.js | 15 +-
api/server/index.js | 17 +-
api/server/middleware/abortMiddleware.js | 13 +-
api/server/middleware/abortRun.js | 92 +
api/server/middleware/buildEndpointOption.js | 11 +-
api/server/middleware/denyRequest.js | 7 +-
api/server/middleware/index.js | 6 +
api/server/middleware/moderateText.js | 3 +-
api/server/middleware/uploadLimiters.js | 75 +
api/server/middleware/validateImageRequest.js | 37 +
api/server/middleware/validateModel.js | 47 +
api/server/routes/__tests__/config.spec.js | 9 +-
api/server/routes/ask/anthropic.js | 16 +-
api/server/routes/ask/askChatGPTBrowser.js | 5 +-
api/server/routes/ask/bingAI.js | 7 +-
api/server/routes/ask/custom.js | 14 +-
api/server/routes/ask/google.js | 14 +-
api/server/routes/ask/gptPlugins.js | 348 +-
api/server/routes/ask/openAI.js | 14 +-
api/server/routes/assistants/actions.js | 202 +
api/server/routes/assistants/assistants.js | 199 +-
api/server/routes/assistants/chat.js | 696 +-
api/server/routes/assistants/tools.js | 8 +
api/server/routes/config.js | 21 +-
api/server/routes/convos.js | 67 +-
api/server/routes/edit/anthropic.js | 14 +-
api/server/routes/edit/custom.js | 14 +-
api/server/routes/edit/google.js | 14 +-
api/server/routes/edit/gptPlugins.js | 313 +-
api/server/routes/edit/openAI.js | 14 +-
api/server/routes/files/avatar.js | 32 +-
api/server/routes/files/files.js | 149 +-
api/server/routes/files/images.js | 48 +-
api/server/routes/files/index.js | 35 +-
api/server/routes/files/multer.js | 34 +-
api/server/routes/index.js | 2 +
api/server/routes/messages.js | 2 +-
api/server/routes/models.js | 8 +-
api/server/routes/oauth.js | 2 +
api/server/routes/presets.js | 17 +-
api/server/routes/static.js | 7 +
api/server/routes/tokenizer.js | 2 +-
api/server/services/ActionService.js | 148 +
api/server/services/AppService.js | 183 +-
api/server/services/AppService.spec.js | 436 +
api/server/services/AssistantService.js | 677 +-
api/server/services/AuthService.js | 36 +-
api/server/services/AuthService.spec.js | 39 +
api/server/services/Config/EndpointService.js | 25 +-
.../services/Config}/getCustomConfig.js | 8 +-
.../services/Config/handleRateLimits.js | 22 +
api/server/services/Config/index.js | 2 +
.../services/Config/loadAsyncEndpoints.js | 25 +-
.../services/Config/loadConfigEndpoints.js | 34 +-
.../services/Config/loadConfigModels.js | 76 +-
.../services/Config/loadConfigModels.spec.js | 329 +
.../services/Config/loadCustomConfig.js | 57 +-
.../services/Config/loadCustomConfig.spec.js | 153 +
.../services/Config/loadDefaultEConfig.js | 29 +-
.../services/Config/loadDefaultModels.js | 25 +-
.../services/Endpoints/anthropic/addTitle.js | 32 +
.../Endpoints/anthropic/buildOptions.js | 3 +-
.../services/Endpoints/anthropic/index.js | 3 +-
.../services/Endpoints/assistants/addTitle.js | 28 +
.../Endpoints/assistants/buildOptions.js | 16 +
.../services/Endpoints/assistants/index.js | 96 +
.../Endpoints/assistants/initializeClient.js | 148 +
.../assistants/initializeClient.spec.js | 99 +
.../services/Endpoints/custom/buildOptions.js | 4 +-
.../Endpoints/custom/initializeClient.js | 74 +-
.../Endpoints/google/initializeClient.spec.js | 2 +
.../Endpoints/gptPlugins/initializeClient.js | 97 +-
.../gptPlugins/initializeClient.spec.js | 183 +-
.../services/Endpoints/openAI/addTitle.js | 8 +-
.../services/Endpoints/openAI/buildOptions.js | 4 +-
.../Endpoints/openAI/initializeClient.js | 86 +-
.../Endpoints/openAI/initializeClient.spec.js | 198 +-
api/server/services/Files/Firebase/crud.js | 96 +-
api/server/services/Files/Firebase/images.js | 32 +-
api/server/services/Files/Local/crud.js | 150 +-
api/server/services/Files/Local/images.js | 39 +-
api/server/services/Files/OpenAI/crud.js | 79 +
api/server/services/Files/OpenAI/index.js | 5 +
api/server/services/Files/VectorDB/crud.js | 102 +
api/server/services/Files/VectorDB/index.js | 5 +
api/server/services/Files/images/avatar.js | 28 +-
api/server/services/Files/images/convert.js | 70 +
api/server/services/Files/images/encode.js | 85 +-
api/server/services/Files/images/index.js | 4 +-
api/server/services/Files/images/resize.js | 40 +-
api/server/services/Files/images/validate.js | 13 -
api/server/services/Files/process.js | 508 +-
api/server/services/Files/strategies.js | 75 +-
api/server/services/ModelService.js | 166 +-
api/server/services/ModelService.spec.js | 258 +
api/server/services/PluginService.js | 3 +-
.../Runs/{RunMananger.js => RunManager.js} | 73 +-
api/server/services/Runs/StreamRunManager.js | 619 +
api/server/services/Runs/handle.js | 264 +
api/server/services/Runs/index.js | 11 +
api/server/services/Runs/methods.js | 63 +
api/server/services/Threads/index.js | 5 +
api/server/services/Threads/manage.js | 657 +
api/server/services/ToolService.js | 356 +
api/server/utils/countTokens.js | 14 +
api/server/utils/crypto.js | 25 +-
.../utils/emails/passwordReset.handlebars | 193 +-
.../emails/requestPasswordReset.handlebars | 248 +-
.../utils/emails/verifyEmail.handlebars | 239 +
api/server/utils/files.js | 47 +
api/server/utils/handleText.js | 66 +-
api/server/utils/handleText.spec.js | 49 +-
api/server/utils/index.js | 4 +
api/server/utils/queue.js | 69 +
api/server/utils/sendEmail.js | 3 +-
api/server/utils/streamResponse.js | 38 +-
api/strategies/localStrategy.js | 7 +-
api/strategies/openidStrategy.js | 35 +-
api/strategies/process.js | 25 +-
api/strategies/validators.js | 28 +-
api/strategies/validators.spec.js | 7 +-
api/typedefs.js | 778 +-
api/utils/azureUtils.js | 15 +-
api/utils/azureUtils.spec.js | 14 +-
api/utils/extractBaseURL.js | 12 +-
api/utils/index.js | 6 +-
api/utils/logAxiosError.js | 45 +
api/utils/tokens.js | 138 +-
api/utils/tokens.spec.js | 90 +
bun.lockb | Bin 857707 -> 885114 bytes
client/index.html | 2 +-
client/nginx.conf | 6 +-
client/package.json | 17 +-
client/public/assets/ShuttleAI_Fibonacci.png | Bin 0 -> 30879 bytes
client/public/assets/anyscale.png | Bin 0 -> 70768 bytes
client/public/assets/cohere.png | Bin 0 -> 26469 bytes
client/public/assets/fireworks.png | Bin 0 -> 223682 bytes
client/public/assets/groq.png | Bin 0 -> 23360 bytes
client/public/assets/mistral.png | Bin 548 -> 4799 bytes
client/public/assets/ollama.png | Bin 0 -> 39717 bytes
client/public/assets/perplexity.png | Bin 0 -> 14309 bytes
client/public/assets/shuttleai.png | Bin 0 -> 261079 bytes
client/public/assets/together.png | Bin 0 -> 20985 bytes
client/public/fonts/Inter-Bold.woff2 | Bin 0 -> 111040 bytes
client/public/fonts/Inter-BoldItalic.woff2 | Bin 0 -> 118392 bytes
client/public/fonts/Inter-Italic.woff2 | Bin 0 -> 114576 bytes
client/public/fonts/Inter-Regular.woff2 | Bin 0 -> 108488 bytes
client/public/fonts/Inter-SemiBold.woff2 | Bin 0 -> 111588 bytes
.../public/fonts/Inter-SemiBoldItalic.woff2 | Bin 0 -> 118216 bytes
.../fonts/roboto-mono-latin-400-italic.woff2 | Bin 0 -> 14052 bytes
.../fonts/roboto-mono-latin-400-normal.woff2 | Bin 0 -> 12764 bytes
.../fonts/roboto-mono-latin-700-normal.woff2 | Bin 0 -> 12740 bytes
.../public/fonts/signifier-bold-italic.woff2 | Bin 56682 -> 0 bytes
client/public/fonts/signifier-bold.woff2 | Bin 56021 -> 0 bytes
.../public/fonts/signifier-light-italic.woff2 | Bin 56456 -> 0 bytes
client/public/fonts/signifier-light.woff2 | Bin 53009 -> 0 bytes
client/public/fonts/soehne-buch-kursiv.woff2 | Bin 34084 -> 0 bytes
client/public/fonts/soehne-buch.woff2 | Bin 33350 -> 0 bytes
.../public/fonts/soehne-halbfett-kursiv.woff2 | Bin 40456 -> 0 bytes
client/public/fonts/soehne-halbfett.woff2 | Bin 37996 -> 0 bytes
.../public/fonts/soehne-kraftig-kursiv.woff2 | Bin 38746 -> 0 bytes
client/public/fonts/soehne-kraftig.woff2 | Bin 35690 -> 0 bytes
.../fonts/soehne-mono-buch-kursiv.woff2 | Bin 28148 -> 0 bytes
client/public/fonts/soehne-mono-buch.woff2 | Bin 27437 -> 0 bytes
.../public/fonts/soehne-mono-halbfett.woff2 | Bin 28285 -> 0 bytes
client/src/App.jsx | 16 +-
client/src/Providers/AssistantsContext.tsx | 18 +-
client/src/Providers/AssistantsMapContext.tsx | 8 +
client/src/Providers/FileMapContext.tsx | 6 +
client/src/Providers/index.ts | 2 +
client/src/Providers/useCreationForm.ts | 19 -
client/src/common/assistants-types.ts | 16 +-
client/src/common/types.ts | 118 +-
client/src/components/Auth/Login.tsx | 216 +-
client/src/components/Auth/LoginForm.tsx | 71 +-
client/src/components/Auth/Registration.tsx | 499 +-
.../components/Auth/RequestPasswordReset.tsx | 159 +-
client/src/components/Auth/ResetPassword.tsx | 41 +-
client/src/components/Auth/SocialButton.tsx | 60 +
.../components/Auth/__tests__/Login.spec.tsx | 23 +-
.../Auth/__tests__/Registration.spec.tsx | 24 +-
client/src/components/Chat/ChatView.tsx | 17 +-
client/src/components/Chat/CreationHeader.tsx | 113 -
client/src/components/Chat/CreationPanel.tsx | 229 -
client/src/components/Chat/Footer.tsx | 77 +-
client/src/components/Chat/Header.tsx | 6 +-
client/src/components/Chat/Input/ChatForm.tsx | 152 +-
.../Chat/Input/Files/AttachFile.tsx | 26 +-
.../Chat/Input/Files/DragDropOverlay.tsx | 2 +-
.../Chat/Input/Files/FileContainer.tsx | 34 +
.../Chat/Input/Files/FilePreview.tsx | 45 +
.../components/Chat/Input/Files/FileRow.tsx | 100 +
.../Input/Files}/FileUpload.tsx | 2 +-
.../components/Chat/Input/Files/FilesView.tsx | 43 +
.../src/components/Chat/Input/Files/Image.tsx | 96 +-
.../Chat/Input/Files/ImagePreview.tsx | 72 +
.../components/Chat/Input/Files/Images.tsx | 116 -
.../Chat/Input/Files/ProgressCircle.tsx | 36 +
.../Chat/Input/Files/RemoveFile.tsx | 25 +
.../Chat/Input/Files/Table/Columns.tsx | 201 +
.../Chat/Input/Files/Table/DataTable.tsx | 243 +
.../Input/Files/Table/SortFilterHeader.tsx | 118 +
.../Chat/Input/Files/Table/TemplateTable.tsx | 88 +
.../Chat/Input/Files/Table/fakeData.ts | 76 +
.../Chat/Input/Files/Table/index.ts | 4 +
.../components/Chat/Input/HeaderOptions.tsx | 9 +-
.../src/components/Chat/Input/OptionsBar.tsx | 165 -
.../components/Chat/Input/OptionsPopover.tsx | 8 +-
.../components/Chat/Input/PopoverButtons.tsx | 115 +-
.../src/components/Chat/Input/SendButton.tsx | 64 +-
.../src/components/Chat/Input/StopButton.tsx | 4 +-
client/src/components/Chat/Input/Textarea.tsx | 41 -
client/src/components/Chat/Landing.tsx | 97 +-
.../components/Chat/Menus/Endpoints/Icons.tsx | 50 +-
.../Chat/Menus/Endpoints/MenuItem.tsx | 45 +-
.../Chat/Menus/Endpoints/UnknownIcon.tsx | 67 +-
.../components/Chat/Menus/EndpointsMenu.tsx | 21 +-
.../components/Chat/Menus/HeaderNewChat.tsx | 23 +
client/src/components/Chat/Menus/NewChat.tsx | 36 -
.../Chat/Menus/Presets/EditPresetDialog.tsx | 49 +-
.../Chat/Menus/Presets/PresetItems.tsx | 52 +-
.../src/components/Chat/Menus/PresetsMenu.tsx | 8 +-
.../src/components/Chat/Menus/UI/MenuItem.tsx | 47 +-
.../Chat/Menus/UI/MenuSeparator.tsx | 2 +-
.../components/Chat/Menus/UI/TitleButton.tsx | 2 +-
client/src/components/Chat/Menus/index.ts | 2 +-
.../Chat/Messages/Content/ActionIcon.tsx | 172 +
.../Chat/Messages/Content/CancelledIcon.tsx | 17 +
.../Chat/Messages/Content/CodeAnalyze.tsx | 152 +
.../Chat/Messages/Content/Container.tsx | 7 +-
.../Chat/Messages/Content/ContentParts.tsx | 50 +
.../Chat/Messages/Content/EditMessage.tsx | 89 +-
.../Chat/Messages/Content/Files.tsx | 39 +
.../Chat/Messages/Content/FinishedIcon.tsx | 18 +
.../Chat/Messages/Content/Image.tsx | 85 +-
.../Chat/Messages/Content/ImageGen.tsx | 141 +
.../Chat/Messages/Content/InProgressCall.tsx | 19 +
.../Chat/Messages/Content/Markdown.tsx | 147 +-
.../Chat/Messages/Content/MarkdownLite.tsx | 45 +
.../Chat/Messages/Content/MessageContent.tsx | 62 +-
.../components/Chat/Messages/Content/Part.tsx | 153 +
.../Chat/Messages/Content/ProgressCircle.tsx | 38 +
.../Chat/Messages/Content/ProgressText.tsx | 71 +
.../Chat/Messages/Content/RetrievalCall.tsx | 53 +
.../Chat/Messages/Content/RetrievalIcon.tsx | 80 +
.../Chat/Messages/Content/ToolCall.tsx | 79 +
.../Chat/Messages/Content/ToolPopover.tsx | 62 +
.../Chat/Messages/Content/WrenchIcon.tsx | 77 +
.../components/Chat/Messages/HoverButtons.tsx | 47 +-
.../src/components/Chat/Messages/Message.tsx | 7 +-
.../components/Chat/Messages/MessageParts.tsx | 124 +
.../components/Chat/Messages/MessagesView.tsx | 4 +-
.../components/Chat/Messages/MultiMessage.tsx | 16 +
client/src/components/Chat/Presentation.tsx | 61 +-
client/src/components/Chat/SearchView.tsx | 22 +
client/src/components/Chat/SingleChatView.tsx | 41 -
.../components/Conversations/Conversation.jsx | 8 +-
.../Conversations/Conversations.tsx | 77 +-
client/src/components/Conversations/Convo.tsx | 61 +-
.../components/Conversations/DeleteButton.tsx | 23 +-
.../Conversations/NewDeleteButton.tsx | 59 -
.../components/Conversations/RenameButton.tsx | 8 +-
client/src/components/Conversations/index.ts | 1 -
.../Endpoints/AlternativeSettings.tsx | 24 +
.../components/Endpoints/EditPresetDialog.tsx | 145 -
.../Endpoints/EndpointOptionsDialog.tsx | 113 -
.../Endpoints/EndpointOptionsPopover.tsx | 6 +-
.../components/Endpoints/EndpointSettings.tsx | 9 +-
client/src/components/Endpoints/Icon.tsx | 269 +-
.../src/components/Endpoints/MinimalIcon.tsx | 10 +-
.../components/Endpoints/PopoverButtons.tsx | 80 -
.../Endpoints/SaveAsPresetDialog.tsx | 7 +-
.../Endpoints/Settings/Advanced.tsx | 334 +
.../Endpoints/Settings/AgentSettings.tsx | 4 +-
.../Endpoints/Settings/Anthropic.tsx | 29 +-
.../Endpoints/Settings/Assistants.tsx | 177 +
.../Endpoints/Settings/Examples.tsx | 4 +-
.../components/Endpoints/Settings/OpenAI.tsx | 92 +-
.../Endpoints/Settings/OptionHover.tsx | 5 +-
.../components/Endpoints/Settings/index.ts | 2 +
.../components/Endpoints/Settings/settings.ts | 2 +
client/src/components/Endpoints/index.ts | 4 +-
.../Input/EndpointMenu/EndpointItem.tsx | 80 -
.../Input/EndpointMenu/EndpointItems.tsx | 22 -
.../Input/EndpointMenu/EndpointMenu.jsx | 280 -
.../Input/EndpointMenu/PresetItem.tsx | 101 -
.../Input/EndpointMenu/PresetItems.tsx | 20 -
.../components/Input/EndpointMenu/index.ts | 1 -
client/src/components/Input/Footer.tsx | 10 +-
.../components/Input/ModelSelect/BingAI.tsx | 4 +-
.../Input/ModelSelect/ModelSelect.tsx | 8 +-
.../components/Input/ModelSelect/Plugins.tsx | 10 +-
.../Input/ModelSelect/PluginsByIndex.tsx | 6 +-
client/src/components/Input/OptionsBar.tsx | 155 -
.../Input/SetKeyDialog/GoogleConfig.tsx | 4 +-
.../Input/SetKeyDialog/HelpText.tsx | 8 +-
.../Input/SetKeyDialog/InputWithLabel.tsx | 4 +-
.../Input/SetKeyDialog/OpenAIConfig.tsx | 180 +-
.../Input/SetKeyDialog/SetKeyDialog.tsx | 62 +-
client/src/components/Input/SubmitButton.tsx | 137 -
client/src/components/Input/TextChat.tsx | 212 -
.../components/Messages/Content/CodeBlock.tsx | 4 +-
.../src/components/Messages/Content/Error.tsx | 5 +-
.../Messages/Content/MessageContent.tsx | 8 +-
.../components/Messages/Content/Plugin.tsx | 8 +-
.../src/components/Messages/HoverButtons.tsx | 10 +-
client/src/components/Messages/Message.tsx | 6 +-
.../src/components/Messages/MessageHeader.tsx | 113 -
client/src/components/Messages/Messages.tsx | 124 -
.../components/Messages/ScrollToBottom.tsx | 26 +-
client/src/components/Nav/ClearConvos.tsx | 2 +-
.../Nav/ExportConversation/ExportModal.jsx | 4 +-
client/src/components/Nav/Logout.tsx | 2 +-
client/src/components/Nav/MobileNav.tsx | 13 +-
client/src/components/Nav/Nav.tsx | 219 +-
client/src/components/Nav/NavLinks.tsx | 93 +-
client/src/components/Nav/NavToggle.tsx | 39 +-
client/src/components/Nav/NewChat.tsx | 98 +-
client/src/components/Nav/SearchBar.tsx | 10 +-
client/src/components/Nav/Settings.tsx | 44 +-
.../Nav/SettingsTabs/Account/Account.tsx | 2 +-
.../Nav/SettingsTabs/Account/Avatar.tsx | 18 +-
.../components/Nav/SettingsTabs/Beta/Beta.tsx | 2 +-
.../components/Nav/SettingsTabs/Data/Data.tsx | 2 +-
.../Nav/SettingsTabs/General/General.tsx | 19 +-
.../General/HideSidePanelSwitch.tsx | 33 +
.../SettingsTabs/General/ShowCodeSwitch.tsx | 33 +
.../Plugins/Store/PluginAuthForm.tsx | 89 +-
.../Plugins/Store/PluginPagination.tsx | 3 +
.../Plugins/Store/PluginStoreDialog.tsx | 134 +-
.../Plugins/Store/PluginStoreItem.tsx | 16 +-
.../Store/__tests__/PluginAuthForm.spec.tsx | 1 +
.../Store/__tests__/PluginPagination.spec.tsx | 2 +-
.../Store/__tests__/PluginStoreItem.spec.tsx | 2 +-
.../SidePanel/AssistantSwitcher.tsx | 84 +
.../SidePanel/Builder/ActionsAuth.tsx | 296 +
.../SidePanel/Builder/ActionsInput.tsx | 287 +
.../SidePanel/Builder/ActionsPanel.tsx | 193 +
.../Builder/ActionsTable/Columns.tsx | 54 +
.../SidePanel/Builder/ActionsTable/Table.tsx | 47 +
.../SidePanel/Builder/ActionsTable/index.ts | 2 +
.../SidePanel/Builder/AssistantAction.tsx | 33 +
.../SidePanel/Builder/AssistantAvatar.tsx | 211 +
.../SidePanel/Builder/AssistantPanel.tsx | 561 +
.../SidePanel/Builder/AssistantSelect.tsx | 210 +
.../SidePanel/Builder/AssistantTool.tsx | 52 +
.../SidePanel/Builder/ContextButton.tsx | 149 +
.../components/SidePanel/Builder/Images.tsx | 133 +
.../SidePanel/Builder/Knowledge.tsx | 128 +
.../SidePanel/Builder/PanelSwitch.tsx | 51 +
.../src/components/SidePanel/Files/Panel.tsx | 14 +
.../SidePanel/Files/PanelColumns.tsx | 47 +
.../SidePanel/Files/PanelFileCell.tsx | 101 +
.../components/SidePanel/Files/PanelTable.tsx | 167 +
.../components/SidePanel/ModelSwitcher.tsx | 54 +
client/src/components/SidePanel/Nav.tsx | 128 +
.../SidePanel/Parameters/DynamicCheckbox.tsx | 97 +
.../SidePanel/Parameters/DynamicDropdown.tsx | 106 +
.../SidePanel/Parameters/DynamicInput.tsx | 93 +
.../SidePanel/Parameters/DynamicSlider.tsx | 175 +
.../SidePanel/Parameters/DynamicSwitch.tsx | 94 +
.../SidePanel/Parameters/DynamicTextarea.tsx | 97 +
.../SidePanel/Parameters/OptionHover.tsx | 26 +
.../components/SidePanel/Parameters/Panel.tsx | 215 +
client/src/components/SidePanel/SidePanel.tsx | 223 +
client/src/components/SidePanel/Switcher.tsx | 14 +
client/src/components/SidePanel/data.tsx | 38 +
client/src/components/SidePanel/index.ts | 2 +
client/src/components/Tools/ToolItem.tsx | 71 +
.../src/components/Tools/ToolSelectDialog.tsx | 240 +
client/src/components/Tools/index.ts | 2 +
client/src/components/svg/AssistantIcon.tsx | 31 +
client/src/components/svg/BirthdayIcon.tsx | 64 +
client/src/components/svg/Blocks.tsx | 20 +
client/src/components/svg/CheckMark.tsx | 8 +-
client/src/components/svg/Clipboard.tsx | 9 +-
client/src/components/svg/DataIcon.tsx | 4 +-
client/src/components/svg/EditIcon.tsx | 11 +-
client/src/components/svg/Files/CodePaths.tsx | 20 +
client/src/components/svg/Files/FileIcon.tsx | 28 +
client/src/components/svg/Files/FilePaths.tsx | 20 +
.../src/components/svg/Files/SheetPaths.tsx | 13 +
client/src/components/svg/Files/TextPaths.tsx | 41 +
client/src/components/svg/GoogleIconChat.tsx | 26 +
client/src/components/svg/NewChatIcon.tsx | 20 +
client/src/components/svg/NewTrashIcon.tsx | 19 +
.../src/components/svg/OpenAIMinimalIcon.tsx | 6 +-
.../src/components/svg/PluginMinimalIcon.tsx | 21 -
client/src/components/svg/RegenerateIcon.tsx | 10 +-
client/src/components/svg/RenameIcon.tsx | 9 +-
client/src/components/svg/Sparkles.tsx | 25 +
client/src/components/svg/Spinner.tsx | 7 +-
client/src/components/svg/TrashIcon.tsx | 13 +-
client/src/components/svg/index.ts | 9 +-
client/src/components/ui/Accordion.tsx | 51 +
client/src/components/ui/AlertDialog.tsx | 10 +-
client/src/components/ui/Button.tsx | 14 +-
client/src/components/ui/Checkbox.tsx | 2 +-
client/src/components/ui/Collapsible.tsx | 9 +
client/src/components/ui/Combobox.tsx | 169 +
.../components/ui/DataTableColumnHeader.tsx | 61 +
client/src/components/ui/Dialog.tsx | 67 +-
client/src/components/ui/DialogTemplate.tsx | 31 +-
client/src/components/ui/Dropdown.tsx | 37 +-
client/src/components/ui/DropdownMenu.tsx | 18 +-
client/src/components/ui/HoverCard.tsx | 2 +-
client/src/components/ui/Input.tsx | 2 +-
client/src/components/ui/InputNumber.tsx | 4 +-
client/src/components/ui/Landing.tsx | 12 +-
client/src/components/ui/MultiSearch.tsx | 132 +
.../src/components/ui/MultiSelectDropDown.tsx | 32 +-
client/src/components/ui/MultiSelectPop.tsx | 36 +-
client/src/components/ui/Prompt.tsx | 2 +-
client/src/components/ui/QuestionMark.tsx | 15 +
client/src/components/ui/Resizable.tsx | 62 +
client/src/components/ui/Select.tsx | 162 +
client/src/components/ui/SelectDropDown.tsx | 34 +-
.../src/components/ui/SelectDropDownPop.tsx | 24 +-
client/src/components/ui/Slider.tsx | 4 +-
client/src/components/ui/Switch.tsx | 4 +-
client/src/components/ui/Table.tsx | 89 +
client/src/components/ui/Tabs.tsx | 4 +-
client/src/components/ui/Templates.tsx | 12 +-
client/src/components/ui/Textarea.tsx | 2 +-
client/src/components/ui/TextareaAutosize.tsx | 10 +
client/src/components/ui/ThemeSelector.tsx | 39 +
client/src/components/ui/Tooltip.tsx | 3 +-
client/src/components/ui/index.ts | 7 +
client/src/data-provider/mutations.ts | 437 +-
client/src/data-provider/queries.ts | 325 +-
client/src/hooks/Assistants/index.ts | 2 +
.../src/hooks/Assistants/useAssistantsMap.ts | 12 +
.../hooks/Assistants/useSelectAssistant.ts | 51 +
client/src/hooks/Config/useConfigOverride.ts | 4 +-
client/src/hooks/Conversations/index.ts | 2 +
.../hooks/Conversations/useDebouncedInput.ts | 54 +
.../Conversations/useParameterEffects.ts | 68 +
client/src/hooks/Conversations/usePresets.ts | 37 +-
client/src/hooks/Files/index.ts | 7 +
.../src/hooks/Files/useDelayedUploadToast.ts | 34 +
.../hooks/Files/useDeleteFilesFromTable.tsx | 42 +
.../src/hooks/{ => Files}/useDragHelpers.ts | 0
client/src/hooks/Files/useFileDeletion.ts | 128 +
client/src/hooks/Files/useFileHandling.ts | 310 +
client/src/hooks/Files/useFileMap.ts | 11 +
.../hooks/{ => Files}/useSetFilesToDelete.ts | 0
client/src/hooks/Files/useUpdateFiles.ts | 72 +
client/src/hooks/Input/index.ts | 1 +
client/src/hooks/Input/useCombobox.ts | 37 +
client/src/hooks/Input/useTextarea.ts | 161 +-
client/src/hooks/Messages/index.ts | 2 +
client/src/hooks/Messages/useAvatar.ts | 50 +
.../src/hooks/Messages/useMessageHelpers.ts | 65 +-
client/src/hooks/Messages/useProgress.ts | 36 +
client/src/hooks/Nav/index.ts | 1 +
client/src/hooks/Nav/useNavScrolling.ts | 64 +
client/src/hooks/Nav/useSideNavLinks.ts | 71 +
client/src/hooks/Plugins/index.ts | 1 +
.../hooks/Plugins/usePluginDialogHelpers.ts | 79 +
client/src/hooks/SSE/index.ts | 2 +
client/src/hooks/SSE/useContentHandler.ts | 83 +
client/src/hooks/SSE/useSSE.ts | 622 +
client/src/hooks/ScreenshotContext.tsx | 2 +-
client/src/hooks/ThemeContext.tsx | 13 +
client/src/hooks/index.ts | 13 +-
client/src/hooks/useChatHelpers.ts | 130 +-
client/src/hooks/useConversation.ts | 11 +-
client/src/hooks/useDefaultConvo.ts | 16 +-
client/src/hooks/useFileHandling.ts | 278 -
client/src/hooks/useFileHandlingResize.ts | 209 -
client/src/hooks/useGenerations.ts | 2 +-
client/src/hooks/useGenerationsByLatest.ts | 2 +-
client/src/hooks/useMessageHandler.ts | 5 +-
client/src/hooks/useNewConvo.ts | 72 +-
client/src/hooks/useOriginNavigate.ts | 2 +-
client/src/hooks/useSSE.ts | 458 -
client/src/hooks/useSetIndexOptions.ts | 14 +-
client/src/localization/Translation.spec.ts | 37 +
.../{Translation.tsx => Translation.ts} | 22 +-
.../localization/languages/{Ar.tsx => Ar.ts} | 2 +-
client/src/localization/languages/Br.ts | 437 +
client/src/localization/languages/Br.tsx | 319 -
client/src/localization/languages/De.ts | 332 +
client/src/localization/languages/De.tsx | 204 -
.../languages/{Eng.tsx => Eng.ts} | 131 +-
client/src/localization/languages/Es.ts | 443 +
client/src/localization/languages/Es.tsx | 322 -
.../localization/languages/{Fr.tsx => Fr.ts} | 3 +
client/src/localization/languages/He.ts | 360 +
.../localization/languages/{Id.tsx => Id.ts} | 3 +
.../localization/languages/{It.tsx => It.ts} | 11 +-
.../localization/languages/{Jp.tsx => Jp.ts} | 37 +-
.../localization/languages/{Ko.tsx => Ko.ts} | 1 +
.../localization/languages/{Nl.tsx => Nl.ts} | 1 +
.../localization/languages/{Pl.tsx => Pl.ts} | 3 +-
.../localization/languages/{Ru.tsx => Ru.ts} | 45 +-
client/src/localization/languages/Sv.ts | 261 +
client/src/localization/languages/Sv.tsx | 260 -
.../localization/languages/{Tr.tsx => Tr.ts} | 1 +
.../localization/languages/{Vi.tsx => Vi.ts} | 1 +
.../localization/languages/{Zh.tsx => Zh.ts} | 65 +-
.../{ZhTraditional.tsx => ZhTraditional.ts} | 1 +
client/src/mobile.css | 58 +-
client/src/routes/AssistantsRoute.tsx | 41 -
client/src/routes/Chat.tsx | 153 -
client/src/routes/ChatRoute.tsx | 42 +-
client/src/routes/Root.tsx | 60 +-
client/src/routes/Search.tsx | 8 +-
client/src/routes/index.tsx | 20 +-
client/src/store/endpoints.ts | 22 +-
client/src/store/families.ts | 16 +
client/src/store/index.ts | 2 -
client/src/store/models.ts | 33 -
client/src/store/settings.ts | 53 +
client/src/style.css | 503 +-
client/src/utils/buildTree.ts | 4 +-
client/src/utils/cleanupPreset.ts | 6 +-
client/src/utils/cn.ts | 7 +-
client/src/utils/convos.fakeData.ts | 568 +
client/src/utils/convos.spec.ts | 228 +
client/src/utils/convos.ts | 184 +
client/src/utils/endpoints.ts | 34 +
client/src/utils/files.ts | 161 +-
client/src/utils/index.ts | 15 +-
client/src/utils/map.ts | 23 +
client/src/utils/textarea.ts | 59 +
client/tailwind.config.cjs | 41 +-
client/vite.config.ts | 3 +
config/add-balance.js | 6 +-
config/update.js | 6 +-
config/user-stats.js | 54 +
deploy-compose.yml | 34 +-
docker-compose.override.yml.example | 109 +-
docker-compose.yml | 43 +-
docs/contributions/coding_conventions.md | 11 +-
.../contributions/documentation_guidelines.md | 133 +-
docs/contributions/how_to_contribute.md | 225 +-
docs/contributions/index.md | 2 +-
docs/contributions/testing.md | 29 +-
.../contributions/translation_contribution.md | 58 +-
docs/deployment/cloudflare.md | 28 +-
docs/deployment/digitalocean.md | 413 +-
docs/deployment/docker_ubuntu_deploy.md | 449 +
docs/deployment/heroku.md | 2 +-
docs/deployment/hetzner_ubuntu.md | 4 +-
docs/deployment/index.md | 32 +-
docs/deployment/introduction.md | 139 +
docs/deployment/linode.md | 6 +-
docs/deployment/meilisearch_in_render.md | 2 +-
docs/deployment/nginx.md | 312 +
docs/deployment/ngrok.md | 4 +-
docs/deployment/railway.md | 55 +
docs/deployment/render.md | 2 +-
docs/deployment/traefik.md | 91 +
docs/deployment/zeabur.md | 43 +
docs/features/index.md | 4 +-
docs/features/manage_your_database.md | 56 +-
docs/features/mod_system.md | 10 +-
docs/features/pandoranext.md | 172 -
.../plugins/chatgpt_plugins_openapi.md | 15 -
docs/features/plugins/google_search.md | 5 +-
docs/features/plugins/index.md | 2 +-
docs/features/presets.md | 4 +-
docs/features/rag_api.md | 149 +
docs/features/third_party.md | 22 +-
docs/features/token_usage.md | 36 +
docs/general_info/breaking_changes.md | 347 +-
docs/general_info/multilingual_information.md | 2 +-
docs/index.md | 30 +-
.../configuration/OAuth2-and-OIDC/aws.md | 114 +
.../configuration/OAuth2-and-OIDC/azure.md | 59 +
.../configuration/OAuth2-and-OIDC/discord.md | 49 +
.../configuration/OAuth2-and-OIDC/facebook.md | 83 +
.../configuration/OAuth2-and-OIDC/github.md | 65 +
.../configuration/OAuth2-and-OIDC/google.md | 97 +
.../configuration/OAuth2-and-OIDC/keycloak.md | 68 +
docs/install/configuration/ai_endpoints.md | 376 +
docs/install/configuration/ai_setup.md | 400 +-
docs/install/configuration/azure_openai.md | 678 +
docs/install/configuration/custom_config.md | 790 +-
docs/install/configuration/docker_override.md | 356 +-
docs/install/configuration/dotenv.md | 288 +-
docs/install/configuration/free_ai_apis.md | 4 +-
docs/install/configuration/index.md | 5 +-
docs/install/configuration/litellm.md | 313 +-
docs/install/configuration/mongodb.md | 2 +-
docs/install/configuration/ollama.md | 29 +
.../install/configuration/user_auth_system.md | 417 +-
docs/install/index.md | 5 +-
.../install/installation/container_install.md | 2 +-
.../installation/docker_compose_install.md | 76 +-
docs/install/installation/linux_install.md | 25 +-
docs/install/installation/mac_install.md | 8 +-
docs/install/installation/windows_install.md | 4 +-
docs/src/requirements.txt | 4 +
e2e/jestSetup.js | 2 +-
index.html | 4 +-
librechat.example.yaml | 129 +-
mkdocs.yml | 14 +-
package-lock.json | 10927 +++++++++-------
package.json | 10 +-
packages/data-provider/package.json | 7 +-
packages/data-provider/specs/actions.spec.ts | 508 +
packages/data-provider/specs/azure.spec.ts | 841 ++
.../data-provider/specs/filetypes.spec.ts | 181 +
packages/data-provider/specs/generate.spec.ts | 586 +
packages/data-provider/specs/openapiSpecs.ts | 350 +
packages/data-provider/specs/parsers.spec.ts | 48 +
packages/data-provider/src/actions.ts | 355 +
packages/data-provider/src/api-endpoints.ts | 17 +-
packages/data-provider/src/azure.ts | 361 +
packages/data-provider/src/config.ts | 410 +-
packages/data-provider/src/createPayload.ts | 6 +-
packages/data-provider/src/data-service.ts | 152 +-
packages/data-provider/src/file-config.ts | 265 +
packages/data-provider/src/generate.ts | 474 +
packages/data-provider/src/index.ts | 5 +
packages/data-provider/src/keys.ts | 11 +-
packages/data-provider/src/parsers.ts | 118 +-
.../src/react-query/assistants.ts | 138 -
.../data-provider/src/react-query/index.ts | 1 -
.../src/react-query/react-query-service.ts | 110 +-
packages/data-provider/src/request.ts | 5 +
packages/data-provider/src/schemas.ts | 301 +-
packages/data-provider/src/types.ts | 37 +-
.../data-provider/src/types/assistants.ts | 275 +-
packages/data-provider/src/types/files.ts | 67 +-
packages/data-provider/src/types/mutations.ts | 104 +-
packages/data-provider/src/types/queries.ts | 34 +
packages/data-provider/tsconfig.json | 2 +-
packages/data-provider/tsconfig.spec.json | 10 +
prettier.config.js | 2 +-
rag.yml | 31 +
utils/docker/docker-build.sh | 21 +
utils/docker/docker-push.sh | 31 +
utils/docker/test-compose.yml | 66 +
723 files changed, 49974 insertions(+), 16749 deletions(-)
create mode 100644 .devcontainer/Dockerfile
delete mode 100644 .github/workflows/container.yml
create mode 100644 .github/workflows/generate_embeddings.yml
delete mode 100644 .github/workflows/latest-images-main.yml
create mode 100644 .github/workflows/main-image-workflow.yml
create mode 100644 .github/workflows/tag-images.yml
create mode 100644 api/app/clients/llm/createCoherePayload.js
create mode 100644 api/app/clients/prompts/createContextHandlers.js
create mode 100644 api/app/clients/prompts/createVisionPrompt.js
delete mode 100644 api/app/clients/tools/GoogleSearch.js
create mode 100644 api/app/clients/tools/structured/GoogleSearch.js
create mode 100644 api/app/clients/tools/structured/TavilySearchResults.js
create mode 100644 api/app/clients/tools/structured/TraversaalSearch.js
create mode 100644 api/models/Action.js
create mode 100644 api/models/Assistant.js
create mode 100644 api/models/schema/action.js
create mode 100644 api/models/schema/assistant.js
create mode 100644 api/models/userMethods.js
create mode 100644 api/server/middleware/abortRun.js
create mode 100644 api/server/middleware/uploadLimiters.js
create mode 100644 api/server/middleware/validateImageRequest.js
create mode 100644 api/server/middleware/validateModel.js
create mode 100644 api/server/routes/assistants/actions.js
create mode 100644 api/server/routes/assistants/tools.js
create mode 100644 api/server/routes/static.js
create mode 100644 api/server/services/ActionService.js
create mode 100644 api/server/services/AppService.spec.js
create mode 100644 api/server/services/AuthService.spec.js
rename api/{cache => server/services/Config}/getCustomConfig.js (69%)
create mode 100644 api/server/services/Config/handleRateLimits.js
create mode 100644 api/server/services/Config/loadConfigModels.spec.js
create mode 100644 api/server/services/Config/loadCustomConfig.spec.js
create mode 100644 api/server/services/Endpoints/anthropic/addTitle.js
create mode 100644 api/server/services/Endpoints/assistants/addTitle.js
create mode 100644 api/server/services/Endpoints/assistants/buildOptions.js
create mode 100644 api/server/services/Endpoints/assistants/index.js
create mode 100644 api/server/services/Endpoints/assistants/initializeClient.js
create mode 100644 api/server/services/Endpoints/assistants/initializeClient.spec.js
create mode 100644 api/server/services/Files/OpenAI/crud.js
create mode 100644 api/server/services/Files/OpenAI/index.js
create mode 100644 api/server/services/Files/VectorDB/crud.js
create mode 100644 api/server/services/Files/VectorDB/index.js
create mode 100644 api/server/services/Files/images/convert.js
delete mode 100644 api/server/services/Files/images/validate.js
create mode 100644 api/server/services/ModelService.spec.js
rename api/server/services/Runs/{RunMananger.js => RunManager.js} (59%)
create mode 100644 api/server/services/Runs/StreamRunManager.js
create mode 100644 api/server/services/Runs/handle.js
create mode 100644 api/server/services/Runs/index.js
create mode 100644 api/server/services/Runs/methods.js
create mode 100644 api/server/services/Threads/index.js
create mode 100644 api/server/services/Threads/manage.js
create mode 100644 api/server/services/ToolService.js
create mode 100644 api/server/utils/emails/verifyEmail.handlebars
create mode 100644 api/server/utils/files.js
create mode 100644 api/server/utils/queue.js
create mode 100644 api/utils/logAxiosError.js
create mode 100644 client/public/assets/ShuttleAI_Fibonacci.png
create mode 100644 client/public/assets/anyscale.png
create mode 100644 client/public/assets/cohere.png
create mode 100644 client/public/assets/fireworks.png
create mode 100644 client/public/assets/groq.png
create mode 100644 client/public/assets/ollama.png
create mode 100644 client/public/assets/perplexity.png
create mode 100644 client/public/assets/shuttleai.png
create mode 100644 client/public/assets/together.png
create mode 100644 client/public/fonts/Inter-Bold.woff2
create mode 100644 client/public/fonts/Inter-BoldItalic.woff2
create mode 100644 client/public/fonts/Inter-Italic.woff2
create mode 100644 client/public/fonts/Inter-Regular.woff2
create mode 100644 client/public/fonts/Inter-SemiBold.woff2
create mode 100644 client/public/fonts/Inter-SemiBoldItalic.woff2
create mode 100644 client/public/fonts/roboto-mono-latin-400-italic.woff2
create mode 100644 client/public/fonts/roboto-mono-latin-400-normal.woff2
create mode 100644 client/public/fonts/roboto-mono-latin-700-normal.woff2
delete mode 100644 client/public/fonts/signifier-bold-italic.woff2
delete mode 100644 client/public/fonts/signifier-bold.woff2
delete mode 100644 client/public/fonts/signifier-light-italic.woff2
delete mode 100644 client/public/fonts/signifier-light.woff2
delete mode 100644 client/public/fonts/soehne-buch-kursiv.woff2
delete mode 100644 client/public/fonts/soehne-buch.woff2
delete mode 100644 client/public/fonts/soehne-halbfett-kursiv.woff2
delete mode 100644 client/public/fonts/soehne-halbfett.woff2
delete mode 100644 client/public/fonts/soehne-kraftig-kursiv.woff2
delete mode 100644 client/public/fonts/soehne-kraftig.woff2
delete mode 100644 client/public/fonts/soehne-mono-buch-kursiv.woff2
delete mode 100644 client/public/fonts/soehne-mono-buch.woff2
delete mode 100644 client/public/fonts/soehne-mono-halbfett.woff2
create mode 100644 client/src/Providers/AssistantsMapContext.tsx
create mode 100644 client/src/Providers/FileMapContext.tsx
delete mode 100644 client/src/Providers/useCreationForm.ts
create mode 100644 client/src/components/Auth/SocialButton.tsx
delete mode 100644 client/src/components/Chat/CreationHeader.tsx
delete mode 100644 client/src/components/Chat/CreationPanel.tsx
create mode 100644 client/src/components/Chat/Input/Files/FileContainer.tsx
create mode 100644 client/src/components/Chat/Input/Files/FilePreview.tsx
create mode 100644 client/src/components/Chat/Input/Files/FileRow.tsx
rename client/src/components/{Input/EndpointMenu => Chat/Input/Files}/FileUpload.tsx (94%)
create mode 100644 client/src/components/Chat/Input/Files/FilesView.tsx
create mode 100644 client/src/components/Chat/Input/Files/ImagePreview.tsx
delete mode 100644 client/src/components/Chat/Input/Files/Images.tsx
create mode 100644 client/src/components/Chat/Input/Files/ProgressCircle.tsx
create mode 100644 client/src/components/Chat/Input/Files/RemoveFile.tsx
create mode 100644 client/src/components/Chat/Input/Files/Table/Columns.tsx
create mode 100644 client/src/components/Chat/Input/Files/Table/DataTable.tsx
create mode 100644 client/src/components/Chat/Input/Files/Table/SortFilterHeader.tsx
create mode 100644 client/src/components/Chat/Input/Files/Table/TemplateTable.tsx
create mode 100644 client/src/components/Chat/Input/Files/Table/fakeData.ts
create mode 100644 client/src/components/Chat/Input/Files/Table/index.ts
delete mode 100644 client/src/components/Chat/Input/OptionsBar.tsx
delete mode 100644 client/src/components/Chat/Input/Textarea.tsx
create mode 100644 client/src/components/Chat/Menus/HeaderNewChat.tsx
delete mode 100644 client/src/components/Chat/Menus/NewChat.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ActionIcon.tsx
create mode 100644 client/src/components/Chat/Messages/Content/CancelledIcon.tsx
create mode 100644 client/src/components/Chat/Messages/Content/CodeAnalyze.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ContentParts.tsx
create mode 100644 client/src/components/Chat/Messages/Content/Files.tsx
create mode 100644 client/src/components/Chat/Messages/Content/FinishedIcon.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ImageGen.tsx
create mode 100644 client/src/components/Chat/Messages/Content/InProgressCall.tsx
create mode 100644 client/src/components/Chat/Messages/Content/MarkdownLite.tsx
create mode 100644 client/src/components/Chat/Messages/Content/Part.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ProgressCircle.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ProgressText.tsx
create mode 100644 client/src/components/Chat/Messages/Content/RetrievalCall.tsx
create mode 100644 client/src/components/Chat/Messages/Content/RetrievalIcon.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ToolCall.tsx
create mode 100644 client/src/components/Chat/Messages/Content/ToolPopover.tsx
create mode 100644 client/src/components/Chat/Messages/Content/WrenchIcon.tsx
create mode 100644 client/src/components/Chat/Messages/MessageParts.tsx
create mode 100644 client/src/components/Chat/SearchView.tsx
delete mode 100644 client/src/components/Chat/SingleChatView.tsx
delete mode 100644 client/src/components/Conversations/NewDeleteButton.tsx
create mode 100644 client/src/components/Endpoints/AlternativeSettings.tsx
delete mode 100644 client/src/components/Endpoints/EditPresetDialog.tsx
delete mode 100644 client/src/components/Endpoints/EndpointOptionsDialog.tsx
delete mode 100644 client/src/components/Endpoints/PopoverButtons.tsx
create mode 100644 client/src/components/Endpoints/Settings/Advanced.tsx
create mode 100644 client/src/components/Endpoints/Settings/Assistants.tsx
delete mode 100644 client/src/components/Input/EndpointMenu/EndpointItem.tsx
delete mode 100644 client/src/components/Input/EndpointMenu/EndpointItems.tsx
delete mode 100644 client/src/components/Input/EndpointMenu/EndpointMenu.jsx
delete mode 100644 client/src/components/Input/EndpointMenu/PresetItem.tsx
delete mode 100644 client/src/components/Input/EndpointMenu/PresetItems.tsx
delete mode 100644 client/src/components/Input/EndpointMenu/index.ts
delete mode 100644 client/src/components/Input/OptionsBar.tsx
delete mode 100644 client/src/components/Input/SubmitButton.tsx
delete mode 100644 client/src/components/Input/TextChat.tsx
delete mode 100644 client/src/components/Messages/MessageHeader.tsx
delete mode 100644 client/src/components/Messages/Messages.tsx
create mode 100644 client/src/components/Nav/SettingsTabs/General/HideSidePanelSwitch.tsx
create mode 100644 client/src/components/Nav/SettingsTabs/General/ShowCodeSwitch.tsx
create mode 100644 client/src/components/SidePanel/AssistantSwitcher.tsx
create mode 100644 client/src/components/SidePanel/Builder/ActionsAuth.tsx
create mode 100644 client/src/components/SidePanel/Builder/ActionsInput.tsx
create mode 100644 client/src/components/SidePanel/Builder/ActionsPanel.tsx
create mode 100644 client/src/components/SidePanel/Builder/ActionsTable/Columns.tsx
create mode 100644 client/src/components/SidePanel/Builder/ActionsTable/Table.tsx
create mode 100644 client/src/components/SidePanel/Builder/ActionsTable/index.ts
create mode 100644 client/src/components/SidePanel/Builder/AssistantAction.tsx
create mode 100644 client/src/components/SidePanel/Builder/AssistantAvatar.tsx
create mode 100644 client/src/components/SidePanel/Builder/AssistantPanel.tsx
create mode 100644 client/src/components/SidePanel/Builder/AssistantSelect.tsx
create mode 100644 client/src/components/SidePanel/Builder/AssistantTool.tsx
create mode 100644 client/src/components/SidePanel/Builder/ContextButton.tsx
create mode 100644 client/src/components/SidePanel/Builder/Images.tsx
create mode 100644 client/src/components/SidePanel/Builder/Knowledge.tsx
create mode 100644 client/src/components/SidePanel/Builder/PanelSwitch.tsx
create mode 100644 client/src/components/SidePanel/Files/Panel.tsx
create mode 100644 client/src/components/SidePanel/Files/PanelColumns.tsx
create mode 100644 client/src/components/SidePanel/Files/PanelFileCell.tsx
create mode 100644 client/src/components/SidePanel/Files/PanelTable.tsx
create mode 100644 client/src/components/SidePanel/ModelSwitcher.tsx
create mode 100644 client/src/components/SidePanel/Nav.tsx
create mode 100644 client/src/components/SidePanel/Parameters/DynamicCheckbox.tsx
create mode 100644 client/src/components/SidePanel/Parameters/DynamicDropdown.tsx
create mode 100644 client/src/components/SidePanel/Parameters/DynamicInput.tsx
create mode 100644 client/src/components/SidePanel/Parameters/DynamicSlider.tsx
create mode 100644 client/src/components/SidePanel/Parameters/DynamicSwitch.tsx
create mode 100644 client/src/components/SidePanel/Parameters/DynamicTextarea.tsx
create mode 100644 client/src/components/SidePanel/Parameters/OptionHover.tsx
create mode 100644 client/src/components/SidePanel/Parameters/Panel.tsx
create mode 100644 client/src/components/SidePanel/SidePanel.tsx
create mode 100644 client/src/components/SidePanel/Switcher.tsx
create mode 100644 client/src/components/SidePanel/data.tsx
create mode 100644 client/src/components/SidePanel/index.ts
create mode 100644 client/src/components/Tools/ToolItem.tsx
create mode 100644 client/src/components/Tools/ToolSelectDialog.tsx
create mode 100644 client/src/components/Tools/index.ts
create mode 100644 client/src/components/svg/AssistantIcon.tsx
create mode 100644 client/src/components/svg/BirthdayIcon.tsx
create mode 100644 client/src/components/svg/Blocks.tsx
create mode 100644 client/src/components/svg/Files/CodePaths.tsx
create mode 100644 client/src/components/svg/Files/FileIcon.tsx
create mode 100644 client/src/components/svg/Files/FilePaths.tsx
create mode 100644 client/src/components/svg/Files/SheetPaths.tsx
create mode 100644 client/src/components/svg/Files/TextPaths.tsx
create mode 100644 client/src/components/svg/GoogleIconChat.tsx
create mode 100644 client/src/components/svg/NewChatIcon.tsx
create mode 100644 client/src/components/svg/NewTrashIcon.tsx
delete mode 100644 client/src/components/svg/PluginMinimalIcon.tsx
create mode 100644 client/src/components/svg/Sparkles.tsx
create mode 100644 client/src/components/ui/Accordion.tsx
create mode 100644 client/src/components/ui/Collapsible.tsx
create mode 100644 client/src/components/ui/Combobox.tsx
create mode 100644 client/src/components/ui/DataTableColumnHeader.tsx
create mode 100644 client/src/components/ui/MultiSearch.tsx
create mode 100644 client/src/components/ui/QuestionMark.tsx
create mode 100644 client/src/components/ui/Resizable.tsx
create mode 100644 client/src/components/ui/Select.tsx
create mode 100644 client/src/components/ui/Table.tsx
create mode 100644 client/src/components/ui/TextareaAutosize.tsx
create mode 100644 client/src/components/ui/ThemeSelector.tsx
create mode 100644 client/src/hooks/Assistants/index.ts
create mode 100644 client/src/hooks/Assistants/useAssistantsMap.ts
create mode 100644 client/src/hooks/Assistants/useSelectAssistant.ts
create mode 100644 client/src/hooks/Conversations/useDebouncedInput.ts
create mode 100644 client/src/hooks/Conversations/useParameterEffects.ts
create mode 100644 client/src/hooks/Files/index.ts
create mode 100644 client/src/hooks/Files/useDelayedUploadToast.ts
create mode 100644 client/src/hooks/Files/useDeleteFilesFromTable.tsx
rename client/src/hooks/{ => Files}/useDragHelpers.ts (100%)
create mode 100644 client/src/hooks/Files/useFileDeletion.ts
create mode 100644 client/src/hooks/Files/useFileHandling.ts
create mode 100644 client/src/hooks/Files/useFileMap.ts
rename client/src/hooks/{ => Files}/useSetFilesToDelete.ts (100%)
create mode 100644 client/src/hooks/Files/useUpdateFiles.ts
create mode 100644 client/src/hooks/Input/useCombobox.ts
create mode 100644 client/src/hooks/Messages/useAvatar.ts
create mode 100644 client/src/hooks/Messages/useProgress.ts
create mode 100644 client/src/hooks/Nav/index.ts
create mode 100644 client/src/hooks/Nav/useNavScrolling.ts
create mode 100644 client/src/hooks/Nav/useSideNavLinks.ts
create mode 100644 client/src/hooks/Plugins/index.ts
create mode 100644 client/src/hooks/Plugins/usePluginDialogHelpers.ts
create mode 100644 client/src/hooks/SSE/index.ts
create mode 100644 client/src/hooks/SSE/useContentHandler.ts
create mode 100644 client/src/hooks/SSE/useSSE.ts
delete mode 100644 client/src/hooks/useFileHandling.ts
delete mode 100644 client/src/hooks/useFileHandlingResize.ts
delete mode 100644 client/src/hooks/useSSE.ts
create mode 100644 client/src/localization/Translation.spec.ts
rename client/src/localization/{Translation.tsx => Translation.ts} (77%)
rename client/src/localization/languages/{Ar.tsx => Ar.ts} (99%)
create mode 100644 client/src/localization/languages/Br.ts
delete mode 100644 client/src/localization/languages/Br.tsx
create mode 100644 client/src/localization/languages/De.ts
delete mode 100644 client/src/localization/languages/De.tsx
rename client/src/localization/languages/{Eng.tsx => Eng.ts} (74%)
create mode 100644 client/src/localization/languages/Es.ts
delete mode 100644 client/src/localization/languages/Es.tsx
rename client/src/localization/languages/{Fr.tsx => Fr.ts} (98%)
create mode 100644 client/src/localization/languages/He.ts
rename client/src/localization/languages/{Id.tsx => Id.ts} (97%)
rename client/src/localization/languages/{It.tsx => It.ts} (97%)
rename client/src/localization/languages/{Jp.tsx => Jp.ts} (89%)
rename client/src/localization/languages/{Ko.tsx => Ko.ts} (99%)
rename client/src/localization/languages/{Nl.tsx => Nl.ts} (99%)
rename client/src/localization/languages/{Pl.tsx => Pl.ts} (99%)
rename client/src/localization/languages/{Ru.tsx => Ru.ts} (88%)
create mode 100644 client/src/localization/languages/Sv.ts
delete mode 100644 client/src/localization/languages/Sv.tsx
rename client/src/localization/languages/{Tr.tsx => Tr.ts} (99%)
rename client/src/localization/languages/{Vi.tsx => Vi.ts} (99%)
rename client/src/localization/languages/{Zh.tsx => Zh.ts} (81%)
rename client/src/localization/languages/{ZhTraditional.tsx => ZhTraditional.ts} (99%)
delete mode 100644 client/src/routes/AssistantsRoute.tsx
delete mode 100644 client/src/routes/Chat.tsx
delete mode 100644 client/src/store/models.ts
create mode 100644 client/src/utils/convos.fakeData.ts
create mode 100644 client/src/utils/convos.spec.ts
create mode 100644 client/src/utils/convos.ts
create mode 100644 client/src/utils/map.ts
create mode 100644 client/src/utils/textarea.ts
create mode 100644 config/user-stats.js
create mode 100644 docs/deployment/docker_ubuntu_deploy.md
create mode 100644 docs/deployment/introduction.md
create mode 100644 docs/deployment/nginx.md
create mode 100644 docs/deployment/railway.md
create mode 100644 docs/deployment/traefik.md
create mode 100644 docs/deployment/zeabur.md
delete mode 100644 docs/features/pandoranext.md
create mode 100644 docs/features/rag_api.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/aws.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/azure.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/discord.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/facebook.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/github.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/google.md
create mode 100644 docs/install/configuration/OAuth2-and-OIDC/keycloak.md
create mode 100644 docs/install/configuration/ai_endpoints.md
create mode 100644 docs/install/configuration/azure_openai.md
create mode 100644 docs/install/configuration/ollama.md
create mode 100644 docs/src/requirements.txt
create mode 100644 packages/data-provider/specs/actions.spec.ts
create mode 100644 packages/data-provider/specs/azure.spec.ts
create mode 100644 packages/data-provider/specs/filetypes.spec.ts
create mode 100644 packages/data-provider/specs/generate.spec.ts
create mode 100644 packages/data-provider/specs/openapiSpecs.ts
create mode 100644 packages/data-provider/specs/parsers.spec.ts
create mode 100644 packages/data-provider/src/actions.ts
create mode 100644 packages/data-provider/src/azure.ts
create mode 100644 packages/data-provider/src/file-config.ts
create mode 100644 packages/data-provider/src/generate.ts
delete mode 100644 packages/data-provider/src/react-query/assistants.ts
create mode 100644 packages/data-provider/src/types/queries.ts
create mode 100644 packages/data-provider/tsconfig.spec.json
create mode 100644 rag.yml
create mode 100755 utils/docker/docker-build.sh
create mode 100755 utils/docker/docker-push.sh
create mode 100644 utils/docker/test-compose.yml
diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
new file mode 100644
index 00000000000..7f5566fb979
--- /dev/null
+++ b/.devcontainer/Dockerfile
@@ -0,0 +1,5 @@
+FROM node:18-bullseye
+
+RUN useradd -m -s /bin/bash vscode
+RUN mkdir -p /workspaces && chown -R vscode:vscode /workspaces
+WORKDIR /workspaces
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index ebfd2685ee6..a3bb7805501 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -13,5 +13,6 @@
}
},
"postCreateCommand": "",
- "features": { "ghcr.io/devcontainers/features/git:1": {} }
+ "features": { "ghcr.io/devcontainers/features/git:1": {} },
+ "remoteUser": "vscode"
}
diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml
index c67fca63019..277ac84f856 100644
--- a/.devcontainer/docker-compose.yml
+++ b/.devcontainer/docker-compose.yml
@@ -2,7 +2,9 @@ version: "3.8"
services:
app:
- image: node:19-bullseye
+ build:
+ context: ..
+ dockerfile: .devcontainer/Dockerfile
# restart: always
links:
- mongodb
@@ -30,8 +32,8 @@ services:
# Use "forwardPorts" in **devcontainer.json** to forward an app port locally.
# (Adding the "ports" property to this file will not forward from a Codespace.)
- # Uncomment the next line to use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details.
- # user: vscode
+ # Use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details.
+ user: vscode
# Overrides default command so things don't shut down after the process ends.
command: /bin/sh -c "while sleep 1000; do :; done"
diff --git a/.env.example b/.env.example
index 2e23a09a349..bd212cc7baa 100644
--- a/.env.example
+++ b/.env.example
@@ -1,21 +1,18 @@
-#=============================================================#
-# LibreChat Configuration #
-#=============================================================#
-# Please refer to the reference documentation for assistance #
-# with configuring your LibreChat environment. The guide is #
-# available both online and within your local LibreChat #
-# directory: #
-# Online: https://docs.librechat.ai/install/dotenv.html #
-# Locally: ./docs/install/dotenv.md #
-#=============================================================#
+#=====================================================================#
+# LibreChat Configuration #
+#=====================================================================#
+# Please refer to the reference documentation for assistance #
+# with configuring your LibreChat environment. The guide is #
+# available both online and within your local LibreChat #
+# directory: #
+# Online: https://docs.librechat.ai/install/configuration/dotenv.html #
+# Locally: ./docs/install/configuration/dotenv.md #
+#=====================================================================#
#==================================================#
# Server Configuration #
#==================================================#
-APP_TITLE=LibreChat
-# CUSTOM_FOOTER="My custom footer"
-
HOST=localhost
PORT=3080
@@ -26,6 +23,13 @@ DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
+#===============#
+# JSON Logging #
+#===============#
+
+# Use when process console logs in cloud deployment like GCP/AWS
+CONSOLE_JSON=false
+
#===============#
# Debug Logging #
#===============#
@@ -40,38 +44,62 @@ DEBUG_CONSOLE=false
# UID=1000
# GID=1000
+#===============#
+# Configuration #
+#===============#
+# Use an absolute path, a relative path, or a URL
+
+# CONFIG_PATH="/alternative/path/to/librechat.yaml"
+
#===================================================#
# Endpoints #
#===================================================#
-# ENDPOINTS=openAI,azureOpenAI,bingAI,chatGPTBrowser,google,gptPlugins,anthropic
+# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,google,gptPlugins,anthropic
PROXY=
+#===================================#
+# Known Endpoints - librechat.yaml #
+#===================================#
+# https://docs.librechat.ai/install/configuration/ai_endpoints.html
+
+# GROQ_API_KEY=
+# SHUTTLEAI_KEY=
+# OPENROUTER_KEY=
+# MISTRAL_API_KEY=
+# ANYSCALE_API_KEY=
+# FIREWORKS_API_KEY=
+# PERPLEXITY_API_KEY=
+# TOGETHERAI_API_KEY=
+
#============#
# Anthropic #
#============#
ANTHROPIC_API_KEY=user_provided
-ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
+# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
# Azure #
#============#
-# AZURE_API_KEY=
-AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4
-# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
-# PLUGINS_USE_AZURE="true"
-AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
+# Note: these variables are DEPRECATED
+# Use the `librechat.yaml` configuration for `azureOpenAI` instead
+# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
-# AZURE_OPENAI_API_INSTANCE_NAME=
-# AZURE_OPENAI_API_DEPLOYMENT_NAME=
-# AZURE_OPENAI_API_VERSION=
-# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
-# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
+# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
+# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
+# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
+# AZURE_API_KEY= # Deprecated
+# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
+# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
+# AZURE_OPENAI_API_VERSION= # Deprecated
+# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
+# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
+# PLUGINS_USE_AZURE="true" # Deprecated
#============#
# BingAI #
@@ -80,14 +108,6 @@ AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
BINGAI_TOKEN=user_provided
# BINGAI_HOST=https://cn.bing.com
-#============#
-# ChatGPT #
-#============#
-
-CHATGPT_TOKEN=
-CHATGPT_MODELS=text-davinci-002-render-sha
-# CHATGPT_REVERSE_PROXY=
-
#============#
# Google #
#============#
@@ -101,7 +121,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
-# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
+# OPENAI_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -115,7 +135,15 @@ DEBUG_OPENAI=false
# OPENAI_REVERSE_PROXY=
-# OPENAI_ORGANIZATION=
+# OPENAI_ORGANIZATION=
+
+#====================#
+# Assistants API #
+#====================#
+
+ASSISTANTS_API_KEY=user_provided
+# ASSISTANTS_BASE_URL=
+# ASSISTANTS_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#============#
# OpenRouter #
@@ -127,7 +155,7 @@ DEBUG_OPENAI=false
# Plugins #
#============#
-# PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
+# PLUGIN_MODELS=gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
DEBUG_PLUGINS=true
@@ -147,20 +175,20 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALL·E
#----------------
-# DALLE_API_KEY= # Key for both DALL-E-2 and DALL-E-3
-# DALLE3_API_KEY= # Key for DALL-E-3 only
-# DALLE2_API_KEY= # Key for DALL-E-2 only
-# DALLE3_SYSTEM_PROMPT="Your DALL-E-3 System Prompt here"
-# DALLE2_SYSTEM_PROMPT="Your DALL-E-2 System Prompt here"
-# DALLE_REVERSE_PROXY= # Reverse proxy for DALL-E-2 and DALL-E-3
-# DALLE3_BASEURL= # Base URL for DALL-E-3
-# DALLE2_BASEURL= # Base URL for DALL-E-2
+# DALLE_API_KEY=
+# DALLE3_API_KEY=
+# DALLE2_API_KEY=
+# DALLE3_SYSTEM_PROMPT=
+# DALLE2_SYSTEM_PROMPT=
+# DALLE_REVERSE_PROXY=
+# DALLE3_BASEURL=
+# DALLE2_BASEURL=
# DALL·E (via Azure OpenAI)
# Note: requires some of the variables above to be set
#----------------
-# DALLE3_AZURE_API_VERSION= # Azure OpenAI API version for DALL-E-3
-# DALLE2_AZURE_API_VERSION= # Azure OpenAI API versiion for DALL-E-2
+# DALLE3_AZURE_API_VERSION=
+# DALLE2_AZURE_API_VERSION=
# Google
#-----------------
@@ -175,6 +203,14 @@ SERPAPI_API_KEY=
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860
+# Tavily
+#-----------------
+TAVILY_API_KEY=
+
+# Traversaal
+#-----------------
+TRAVERSAAL_API_KEY=
+
# WolframAlpha
#-----------------
WOLFRAM_APP_ID=
@@ -202,7 +238,7 @@ MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
OPENAI_MODERATION=false
OPENAI_MODERATION_API_KEY=
-# OPENAI_MODERATION_REVERSE_PROXY=not working with some reverse proxys
+# OPENAI_MODERATION_REVERSE_PROXY=
BAN_VIOLATIONS=true
BAN_DURATION=1000 * 60 * 60 * 2
@@ -230,6 +266,8 @@ LIMIT_MESSAGE_USER=false
MESSAGE_USER_MAX=40
MESSAGE_USER_WINDOW=1
+ILLEGAL_MODEL_REQ_SCORE=5
+
#========================#
# Balance #
#========================#
@@ -278,6 +316,9 @@ OPENID_ISSUER=
OPENID_SESSION_SECRET=
OPENID_SCOPE="openid profile email"
OPENID_CALLBACK_URL=/oauth/openid/callback
+OPENID_REQUIRED_ROLE=
+OPENID_REQUIRED_ROLE_TOKEN_KIND=
+OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
@@ -286,15 +327,15 @@ OPENID_IMAGE_URL=
# Email Password Reset #
#========================#
-EMAIL_SERVICE=
-EMAIL_HOST=
-EMAIL_PORT=25
-EMAIL_ENCRYPTION=
-EMAIL_ENCRYPTION_HOSTNAME=
-EMAIL_ALLOW_SELFSIGNED=
-EMAIL_USERNAME=
-EMAIL_PASSWORD=
-EMAIL_FROM_NAME=
+EMAIL_SERVICE=
+EMAIL_HOST=
+EMAIL_PORT=25
+EMAIL_ENCRYPTION=
+EMAIL_ENCRYPTION_HOSTNAME=
+EMAIL_ALLOW_SELFSIGNED=
+EMAIL_USERNAME=
+EMAIL_PASSWORD=
+EMAIL_FROM_NAME=
EMAIL_FROM=noreply@librechat.ai
#========================#
@@ -308,6 +349,16 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
+#===================================================#
+# UI #
+#===================================================#
+
+APP_TITLE=LibreChat
+# CUSTOM_FOOTER="My custom footer"
+HELP_AND_FAQ_URL=https://librechat.ai
+
+# SHOW_BIRTHDAY_ICON=true
+
#==================================================#
# Others #
#==================================================#
diff --git a/.eslintrc.js b/.eslintrc.js
index a3d71acd69f..e85e0d768ca 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -19,6 +19,7 @@ module.exports = {
'e2e/playwright-report/**/*',
'packages/data-provider/types/**/*',
'packages/data-provider/dist/**/*',
+ 'packages/data-provider/test_bundle/**/*',
'data-node/**/*',
'meili_data/**/*',
'node_modules/**/*',
@@ -131,6 +132,12 @@ module.exports = {
},
],
},
+ {
+ files: ['./packages/data-provider/specs/**/*.ts'],
+ parserOptions: {
+ project: './packages/data-provider/tsconfig.spec.json',
+ },
+ },
],
settings: {
react: {
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
index 3f39cc00b3b..cb767cbd7cd 100644
--- a/.github/CODE_OF_CONDUCT.md
+++ b/.github/CODE_OF_CONDUCT.md
@@ -60,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement here on GitHub or
-on the official [Discord Server](https://discord.gg/uDyZ5Tzhct).
+on the official [Discord Server](https://discord.librechat.ai).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 36618437fab..142f67c953f 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -8,7 +8,7 @@ If the feature you would like to contribute has not already received prior appro
Please note that a pull request involving a feature that has not been reviewed and approved by the project maintainers may be rejected. We appreciate your understanding and cooperation.
-If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.gg/uDyZ5Tzhct), where you can engage with other contributors and seek guidance from the community.
+If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.librechat.ai), where you can engage with other contributors and seek guidance from the community.
## Our Standards
diff --git a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
index b6b64c3f2de..5c88b9f70dc 100644
--- a/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
+++ b/.github/ISSUE_TEMPLATE/BUG-REPORT.yml
@@ -50,7 +50,7 @@ body:
id: terms
attributes:
label: Code of Conduct
- description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md)
+ description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true
diff --git a/.github/SECURITY.md b/.github/SECURITY.md
index bd105f2526c..b01e04e0160 100644
--- a/.github/SECURITY.md
+++ b/.github/SECURITY.md
@@ -12,7 +12,7 @@ When reporting a security vulnerability, you have the following options to reach
- **Option 2: GitHub Issues**: You can initiate first contact via GitHub Issues. However, please note that initial contact through GitHub Issues should not include any sensitive details.
-- **Option 3: Discord Server**: You can join our [Discord community](https://discord.gg/5rbRxn4uME) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
+- **Option 3: Discord Server**: You can join our [Discord community](https://discord.librechat.ai) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
_After the initial contact, we will establish a private communication channel for further discussion._
@@ -39,11 +39,11 @@ Please note that as a security-conscious community, we may not always disclose d
This security policy applies to the following GitHub repository:
-- Repository: [LibreChat](https://github.com/danny-avila/LibreChat)
+- Repository: [LibreChat](https://github.librechat.ai)
## Contact
-If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.gg/NGaa9RPCft) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
+If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.librechat.ai) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
## Acknowledgments
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index 06d2656bd64..a1542cb76e4 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -15,8 +15,9 @@ Please delete any irrelevant options.
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
-- [ ] Documentation update
- [ ] Translation update
+- [ ] Documentation update
+
## Testing
@@ -26,6 +27,8 @@ Please describe your test process and include instructions so that we can reprod
## Checklist
+Please delete any irrelevant options.
+
- [ ] My code adheres to this project's style guidelines
- [ ] I have performed a self-review of my own code
- [ ] I have commented in any complex areas of my code
@@ -34,3 +37,4 @@ Please describe your test process and include instructions so that we can reprod
- [ ] I have written tests demonstrating that my changes are effective or that my feature works
- [ ] Local unit tests pass with my changes
- [ ] Any changes dependent on mine have been merged and published in downstream modules.
+- [ ] New documents have been locally validated with mkdocs
diff --git a/.github/workflows/backend-review.yml b/.github/workflows/backend-review.yml
index fddb6cdac63..db46653c651 100644
--- a/.github/workflows/backend-review.yml
+++ b/.github/workflows/backend-review.yml
@@ -30,10 +30,28 @@ jobs:
- name: Install Data Provider
run: npm run build:data-provider
+
+ - name: Create empty auth.json file
+ run: |
+ mkdir -p api/data
+ echo '{}' > api/data/auth.json
+
+ - name: Check for Circular dependency in rollup
+ working-directory: ./packages/data-provider
+ run: |
+ output=$(npm run rollup:api)
+ echo "$output"
+ if echo "$output" | grep -q "Circular dependency"; then
+ echo "Error: Circular dependency detected!"
+ exit 1
+ fi
- name: Run unit tests
run: cd api && npm run test:ci
+ - name: Run librechat-data-provider unit tests
+ run: cd packages/data-provider && npm run test:ci
+
- name: Run linters
uses: wearerequired/lint-action@v2
with:
diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml
deleted file mode 100644
index 23c6ad48cc8..00000000000
--- a/.github/workflows/container.yml
+++ /dev/null
@@ -1,83 +0,0 @@
-name: Docker Compose Build on Tag
-
-# The workflow is triggered when a tag is pushed
-on:
- push:
- tags:
- - "*"
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- # Check out the repository
- - name: Checkout
- uses: actions/checkout@v4
-
- # Set up Docker
- - name: Set up Docker
- uses: docker/setup-buildx-action@v3
-
- # Set up QEMU for cross-platform builds
- - name: Set up QEMU
- uses: docker/setup-qemu-action@v3
-
- # Log in to GitHub Container Registry
- - name: Log in to GitHub Container Registry
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- # Prepare Docker Build
- - name: Build Docker images
- run: |
- cp .env.example .env
-
- # Tag and push librechat-api
- - name: Docker metadata for librechat-api
- id: meta-librechat-api
- uses: docker/metadata-action@v5
- with:
- images: |
- ghcr.io/${{ github.repository_owner }}/librechat-api
- tags: |
- type=raw,value=latest
- type=semver,pattern={{version}}
- type=semver,pattern={{major}}
- type=semver,pattern={{major}}.{{minor}}
-
- - name: Build and librechat-api
- uses: docker/build-push-action@v5
- with:
- file: Dockerfile.multi
- context: .
- push: true
- tags: ${{ steps.meta-librechat-api.outputs.tags }}
- platforms: linux/amd64,linux/arm64
- target: api-build
-
- # Tag and push librechat
- - name: Docker metadata for librechat
- id: meta-librechat
- uses: docker/metadata-action@v5
- with:
- images: |
- ghcr.io/${{ github.repository_owner }}/librechat
- tags: |
- type=raw,value=latest
- type=semver,pattern={{version}}
- type=semver,pattern={{major}}
- type=semver,pattern={{major}}.{{minor}}
-
- - name: Build and librechat
- uses: docker/build-push-action@v5
- with:
- file: Dockerfile
- context: .
- push: true
- tags: ${{ steps.meta-librechat.outputs.tags }}
- platforms: linux/amd64,linux/arm64
- target: node
diff --git a/.github/workflows/dev-images.yml b/.github/workflows/dev-images.yml
index e0149e05e9c..41d427c6c8b 100644
--- a/.github/workflows/dev-images.yml
+++ b/.github/workflows/dev-images.yml
@@ -2,18 +2,38 @@ name: Docker Dev Images Build
on:
workflow_dispatch:
+ push:
+ branches:
+ - main
+ paths:
+ - 'api/**'
+ - 'client/**'
+ - 'packages/**'
jobs:
build:
runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - target: api-build
+ file: Dockerfile.multi
+ image_name: librechat-dev-api
+ - target: node
+ file: Dockerfile
+ image_name: librechat-dev
steps:
# Check out the repository
- name: Checkout
uses: actions/checkout@v4
- # Set up Docker
- - name: Set up Docker
+ # Set up QEMU
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Set up Docker Buildx
+ - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Log in to GitHub Container Registry
@@ -24,22 +44,29 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- # Build Docker images
- - name: Build Docker images
+ # Login to Docker Hub
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ # Prepare the environment
+ - name: Prepare environment
run: |
cp .env.example .env
- docker build -f Dockerfile.multi --target api-build -t librechat-dev-api .
- docker build -f Dockerfile -t librechat-dev .
- # Tag and push the images to GitHub Container Registry
- - name: Tag and push images
- run: |
- docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
- docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
-
- docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
- docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
+ # Build and push Docker images for each target
+ - name: Build and push Docker images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ${{ matrix.file }}
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
+ platforms: linux/amd64,linux/arm64
+ target: ${{ matrix.target }}
diff --git a/.github/workflows/generate_embeddings.yml b/.github/workflows/generate_embeddings.yml
new file mode 100644
index 00000000000..c514f9c1d6b
--- /dev/null
+++ b/.github/workflows/generate_embeddings.yml
@@ -0,0 +1,20 @@
+name: 'generate_embeddings'
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+ paths:
+ - 'docs/**'
+
+jobs:
+ generate:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: supabase/embeddings-generator@v0.0.5
+ with:
+ supabase-url: ${{ secrets.SUPABASE_URL }}
+ supabase-service-role-key: ${{ secrets.SUPABASE_SERVICE_ROLE_KEY }}
+ openai-key: ${{ secrets.OPENAI_DOC_EMBEDDINGS_KEY }}
+ docs-root-path: 'docs'
\ No newline at end of file
diff --git a/.github/workflows/latest-images-main.yml b/.github/workflows/latest-images-main.yml
deleted file mode 100644
index 5149cecb0e6..00000000000
--- a/.github/workflows/latest-images-main.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: Docker Compose Build on Main Branch
-
-on:
- workflow_dispatch: # This line allows manual triggering
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- steps:
- # Check out the repository
- - name: Checkout
- uses: actions/checkout@v4
-
- # Set up Docker
- - name: Set up Docker
- uses: docker/setup-buildx-action@v3
-
- # Log in to GitHub Container Registry
- - name: Log in to GitHub Container Registry
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: ${{ github.actor }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- # Run docker-compose build
- - name: Build Docker images
- run: |
- cp .env.example .env
- docker-compose build
- docker build -f Dockerfile.multi --target api-build -t librechat-api .
-
- # Tag and push the images with the 'latest' tag
- - name: Tag image and push
- run: |
- docker tag librechat:latest ghcr.io/${{ github.repository_owner }}/librechat:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat:latest
- docker tag librechat-api:latest ghcr.io/${{ github.repository_owner }}/librechat-api:latest
- docker push ghcr.io/${{ github.repository_owner }}/librechat-api:latest
diff --git a/.github/workflows/main-image-workflow.yml b/.github/workflows/main-image-workflow.yml
new file mode 100644
index 00000000000..43c9d957534
--- /dev/null
+++ b/.github/workflows/main-image-workflow.yml
@@ -0,0 +1,69 @@
+name: Docker Compose Build Latest Main Image Tag (Manual Dispatch)
+
+on:
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - target: api-build
+ file: Dockerfile.multi
+ image_name: librechat-api
+ - target: node
+ file: Dockerfile
+ image_name: librechat
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Fetch tags and set the latest tag
+ run: |
+ git fetch --tags
+ echo "LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)" >> $GITHUB_ENV
+
+ # Set up QEMU
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Set up Docker Buildx
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ # Log in to GitHub Container Registry
+ - name: Log in to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ # Login to Docker Hub
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ # Prepare the environment
+ - name: Prepare environment
+ run: |
+ cp .env.example .env
+
+ # Build and push Docker images for each target
+ - name: Build and push Docker images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ${{ matrix.file }}
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
+ platforms: linux/amd64,linux/arm64
+ target: ${{ matrix.target }}
diff --git a/.github/workflows/tag-images.yml b/.github/workflows/tag-images.yml
new file mode 100644
index 00000000000..e90f43978ab
--- /dev/null
+++ b/.github/workflows/tag-images.yml
@@ -0,0 +1,67 @@
+name: Docker Images Build on Tag
+
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ include:
+ - target: api-build
+ file: Dockerfile.multi
+ image_name: librechat-api
+ - target: node
+ file: Dockerfile
+ image_name: librechat
+
+ steps:
+ # Check out the repository
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ # Set up QEMU
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ # Set up Docker Buildx
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ # Log in to GitHub Container Registry
+ - name: Log in to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ghcr.io
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ # Login to Docker Hub
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ # Prepare the environment
+ - name: Prepare environment
+ run: |
+ cp .env.example .env
+
+ # Build and push Docker images for each target
+ - name: Build and push Docker images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ${{ matrix.file }}
+ push: true
+ tags: |
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.ref_name }}
+ ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.ref_name }}
+ ${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
+ platforms: linux/amd64,linux/arm64
+ target: ${{ matrix.target }}
diff --git a/.gitignore b/.gitignore
index 765de5cb799..c55115988b9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,7 @@ bower_components/
#config file
librechat.yaml
+librechat.yml
# Environment
.npmrc
@@ -74,6 +75,7 @@ src/style - official.css
config.local.ts
**/storageState.json
junit.xml
+**/.venv/
# docker override file
docker-compose.override.yaml
@@ -88,4 +90,10 @@ auth.json
/packages/ux-shared/
/images
-!client/src/components/Nav/SettingsTabs/Data/
\ No newline at end of file
+!client/src/components/Nav/SettingsTabs/Data/
+
+# User uploads
+uploads/
+
+# owner
+release/
\ No newline at end of file
diff --git a/.husky/pre-commit b/.husky/pre-commit
index af85628072b..67f5b002728 100755
--- a/.husky/pre-commit
+++ b/.husky/pre-commit
@@ -1,4 +1,4 @@
-#!/usr/bin/env sh
+#!/usr/bin/env sh
set -e
. "$(dirname -- "$0")/_/husky.sh"
[ -n "$CI" ] && exit 0
diff --git a/Dockerfile b/Dockerfile
index edc79c2497a..fd087eae39d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,20 +1,35 @@
+# v0.7.0
+
# Base node image
-FROM node:18-alpine AS node
+FROM node:18-alpine3.18 AS node
+
+RUN apk add g++ make py3-pip
+RUN npm install -g node-gyp
+RUN apk --no-cache add curl
-COPY . /app
+RUN mkdir -p /app && chown node:node /app
WORKDIR /app
+USER node
+
+COPY --chown=node:node . .
+
# Allow mounting of these files, which have no default
# values.
RUN touch .env
-# Install call deps - Install curl for health check
-RUN apk --no-cache add curl && \
- npm ci
+RUN npm config set fetch-retry-maxtimeout 600000
+RUN npm config set fetch-retries 5
+RUN npm config set fetch-retry-mintimeout 15000
+RUN npm install --no-audit
# React client build
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run frontend
+# Create directories for the volumes to inherit
+# the correct permissions
+RUN mkdir -p /app/client/public/images /app/api/logs
+
# Node API setup
EXPOSE 3080
ENV HOST=0.0.0.0
diff --git a/Dockerfile.multi b/Dockerfile.multi
index 0d5ebec5e23..00ed37e3ef8 100644
--- a/Dockerfile.multi
+++ b/Dockerfile.multi
@@ -1,3 +1,5 @@
+# v0.7.0
+
# Build API, Client and Data Provider
FROM node:20-alpine AS base
@@ -11,11 +13,12 @@ RUN npm run build
# React client build
FROM data-provider-build AS client-build
WORKDIR /app/client
-COPY ./client/ ./
+COPY ./client/package*.json ./
# Copy data-provider to client's node_modules
RUN mkdir -p /app/client/node_modules/librechat-data-provider/
RUN cp -R /app/packages/data-provider/* /app/client/node_modules/librechat-data-provider/
RUN npm install
+COPY ./client/ ./
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build
@@ -24,6 +27,8 @@ FROM data-provider-build AS api-build
WORKDIR /app/api
COPY api/package*.json ./
COPY api/ ./
+# Copy helper scripts
+COPY config/ ./
# Copy data-provider to API's node_modules
RUN mkdir -p /app/api/node_modules/librechat-data-provider/
RUN cp -R /app/packages/data-provider/* /app/api/node_modules/librechat-data-provider/
diff --git a/README.md b/README.md
index 00cd890b073..901ddbc7c14 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,10 @@
-
+
-
- LibreChat
-
+
@@ -27,42 +27,48 @@
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
# 📃 Features
- - 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and 11-2023 updates
- - 💬 Multimodal Chat:
- - Upload and analyze images with GPT-4 and Gemini Vision 📸
- - More filetypes and Assistants API integration in Active Development 🚧
- - 🌎 Multilingual UI:
- - English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- - Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands
- - 🤖 AI model selection: OpenAI API, Azure, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins
- - 💾 Create, Save, & Share Custom Presets
- - 🔄 Edit, Resubmit, and Continue messages with conversation branching
- - 📤 Export conversations as screenshots, markdown, text, json.
- - 🔍 Search all messages/conversations
- - 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- - 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
- - ⚙️ Configure Proxy, Reverse Proxy, Docker, many Deployment options, and completely Open-Source
-[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
+- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
+- 💬 Multimodal Chat:
+ - Upload and analyze images with Claude 3, GPT-4, and Gemini Vision 📸
+ - Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
+ - Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
+ - Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
+ - Non-OpenAI Agents in Active Development 🚧
+- 🌎 Multilingual UI:
+ - English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
+ - Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
+- 🤖 AI model selection: OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
+- 💾 Create, Save, & Share Custom Presets
+- 🔄 Edit, Resubmit, and Continue messages with conversation branching
+- 📤 Export conversations as screenshots, markdown, text, json.
+- 🔍 Search all messages/conversations
+- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
+- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
+- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options
+- 📖 Completely Open-Source & Built in Public
+- 🧑🤝🧑 Community-driven development, support, and feedback
+[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
## 🪶 All-In-One AI Conversations with LibreChat
+
LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
-
+
[](https://youtu.be/pNIOs1ovsXw)
@@ -71,11 +77,13 @@ Click on the thumbnail to open the video☝️
---
## 📚 Documentation
+
For more information on how to use our advanced features, install and configure our software, and access our guidelines and tutorials, please check out our documentation at [docs.librechat.ai](https://docs.librechat.ai)
---
-## 📝 Changelog
+## 📝 Changelog
+
Keep up with the latest updates by visiting the releases page - [Releases](https://github.com/danny-avila/LibreChat/releases)
**⚠️ [Breaking Changes](docs/general_info/breaking_changes.md)**
@@ -96,14 +104,15 @@ Please consult the breaking changes before updating.
---
## ✨ Contributions
+
Contributions, suggestions, bug reports and fixes are welcome!
-For new features, components, or extensions, please open an issue and discuss before sending a PR.
+For new features, components, or extensions, please open an issue and discuss before sending a PR.
---
-💖 This project exists in its current state thanks to all the people who contribute
----
+## 💖 This project exists in its current state thanks to all the people who contribute
+
diff --git a/api/app/chatgpt-browser.js b/api/app/chatgpt-browser.js
index 467e67785d3..818661555dc 100644
--- a/api/app/chatgpt-browser.js
+++ b/api/app/chatgpt-browser.js
@@ -1,5 +1,6 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
+const { Constants } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
const browserClient = async ({
@@ -48,7 +49,7 @@ const browserClient = async ({
options = { ...options, parentMessageId, conversationId };
}
- if (parentMessageId === '00000000-0000-0000-0000-000000000000') {
+ if (parentMessageId === Constants.NO_PARENT) {
delete options.conversationId;
}
diff --git a/api/app/clients/AnthropicClient.js b/api/app/clients/AnthropicClient.js
index 0441a49334e..6d478defab0 100644
--- a/api/app/clients/AnthropicClient.js
+++ b/api/app/clients/AnthropicClient.js
@@ -1,6 +1,19 @@
const Anthropic = require('@anthropic-ai/sdk');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
-const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
+const {
+ getResponseSender,
+ EModelEndpoint,
+ validateVisionModel,
+} = require('librechat-data-provider');
+const { encodeAndFormat } = require('~/server/services/Files/images/encode');
+const {
+ titleFunctionPrompt,
+ parseTitleFromPrompt,
+ truncateText,
+ formatMessage,
+ createContextHandlers,
+} = require('./prompts');
+const spendTokens = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -10,12 +23,20 @@ const AI_PROMPT = '\n\nAssistant:';
const tokenizersCache = {};
+/** Helper function to introduce a delay before retrying */
+function delayBeforeRetry(attempts, baseDelay = 1000) {
+ return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
+}
+
class AnthropicClient extends BaseClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
this.userLabel = HUMAN_PROMPT;
this.assistantLabel = AI_PROMPT;
+ this.contextStrategy = options.contextStrategy
+ ? options.contextStrategy.toLowerCase()
+ : 'discard';
this.setOptions(options);
}
@@ -47,6 +68,12 @@ class AnthropicClient extends BaseClient {
stop: modelOptions.stop, // no stop method for now
};
+ this.isClaude3 = this.modelOptions.model.includes('claude-3');
+ this.useMessages = this.isClaude3 || !!this.options.attachments;
+
+ this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
+ this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
+
this.maxContextTokens =
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000;
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
@@ -87,7 +114,12 @@ class AnthropicClient extends BaseClient {
return this;
}
+ /**
+ * Get the initialized Anthropic client.
+ * @returns {Anthropic} The Anthropic client instance.
+ */
getClient() {
+ /** @type {Anthropic.default.RequestOptions} */
const options = {
apiKey: this.apiKey,
};
@@ -99,6 +131,75 @@ class AnthropicClient extends BaseClient {
return new Anthropic(options);
}
+ getTokenCountForResponse(response) {
+ return this.getTokenCountForMessage({
+ role: 'assistant',
+ content: response.text,
+ });
+ }
+
+ /**
+ *
+ * Checks if the model is a vision model based on request attachments and sets the appropriate options:
+ * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
+ * - Sets `this.isVisionModel` to `true` if vision request.
+ * - Deletes `this.modelOptions.stop` if vision request.
+ * @param {MongoFile[]} attachments
+ */
+ checkVisionRequest(attachments) {
+ const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
+
+ const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
+ if (
+ attachments &&
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
+ visionModelAvailable &&
+ !this.isVisionModel
+ ) {
+ this.modelOptions.model = this.defaultVisionModel;
+ this.isVisionModel = true;
+ }
+ }
+
+ /**
+ * Calculate the token cost in tokens for an image based on its dimensions and detail level.
+ *
+ * For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
+ *
+ * @param {Object} image - The image object.
+ * @param {number} image.width - The width of the image.
+ * @param {number} image.height - The height of the image.
+ * @returns {number} The calculated token cost measured by tokens.
+ *
+ */
+ calculateImageTokenCost({ width, height }) {
+ return Math.ceil((width * height) / 750);
+ }
+
+ async addImageURLs(message, attachments) {
+ const { files, image_urls } = await encodeAndFormat(
+ this.options.req,
+ attachments,
+ EModelEndpoint.anthropic,
+ );
+ message.image_urls = image_urls.length ? image_urls : undefined;
+ return files;
+ }
+
+ async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
+ await spendTokens(
+ {
+ context,
+ user: this.user,
+ conversationId: this.conversationId,
+ model: model ?? this.modelOptions.model,
+ endpointTokenConfig: this.options.endpointTokenConfig,
+ },
+ { promptTokens, completionTokens },
+ );
+ }
+
async buildMessages(messages, parentMessageId) {
const orderedMessages = this.constructor.getMessagesForConversation({
messages,
@@ -107,28 +208,145 @@ class AnthropicClient extends BaseClient {
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
- const formattedMessages = orderedMessages.map((message) => ({
- author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
- content: message?.content ?? message.text,
- }));
+ if (this.options.attachments) {
+ const attachments = await this.options.attachments;
+ const images = attachments.filter((file) => file.type.includes('image'));
+
+ if (images.length && !this.isVisionModel) {
+ throw new Error('Images are only supported with the Claude 3 family of models');
+ }
+
+ const latestMessage = orderedMessages[orderedMessages.length - 1];
+
+ if (this.message_file_map) {
+ this.message_file_map[latestMessage.messageId] = attachments;
+ } else {
+ this.message_file_map = {
+ [latestMessage.messageId]: attachments,
+ };
+ }
+
+ const files = await this.addImageURLs(latestMessage, attachments);
+
+ this.options.attachments = files;
+ }
+
+ if (this.message_file_map) {
+ this.contextHandlers = createContextHandlers(
+ this.options.req,
+ orderedMessages[orderedMessages.length - 1].text,
+ );
+ }
+
+ const formattedMessages = orderedMessages.map((message, i) => {
+ const formattedMessage = this.useMessages
+ ? formatMessage({
+ message,
+ endpoint: EModelEndpoint.anthropic,
+ })
+ : {
+ author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
+ content: message?.content ?? message.text,
+ };
+
+ const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
+ /* If tokens were never counted, or, is a Vision request and the message has files, count again */
+ if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
+ orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
+ }
+
+ /* If message has files, calculate image token cost */
+ if (this.message_file_map && this.message_file_map[message.messageId]) {
+ const attachments = this.message_file_map[message.messageId];
+ for (const file of attachments) {
+ if (file.embedded) {
+ this.contextHandlers?.processFile(file);
+ continue;
+ }
+
+ orderedMessages[i].tokenCount += this.calculateImageTokenCost({
+ width: file.width,
+ height: file.height,
+ });
+ }
+ }
+
+ formattedMessage.tokenCount = orderedMessages[i].tokenCount;
+ return formattedMessage;
+ });
+
+ if (this.contextHandlers) {
+ this.augmentedPrompt = await this.contextHandlers.createContext();
+ this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? '');
+ }
+
+ let { context: messagesInWindow, remainingContextTokens } =
+ await this.getMessagesWithinTokenLimit(formattedMessages);
+
+ const tokenCountMap = orderedMessages
+ .slice(orderedMessages.length - messagesInWindow.length)
+ .reduce((map, message, index) => {
+ const { messageId } = message;
+ if (!messageId) {
+ return map;
+ }
+
+ map[messageId] = orderedMessages[index].tokenCount;
+ return map;
+ }, {});
+
+ logger.debug('[AnthropicClient]', {
+ messagesInWindow: messagesInWindow.length,
+ remainingContextTokens,
+ });
let lastAuthor = '';
let groupedMessages = [];
- for (let message of formattedMessages) {
+ for (let i = 0; i < messagesInWindow.length; i++) {
+ const message = messagesInWindow[i];
+ const author = message.role ?? message.author;
// If last author is not same as current author, add to new group
- if (lastAuthor !== message.author) {
- groupedMessages.push({
- author: message.author,
+ if (lastAuthor !== author) {
+ const newMessage = {
content: [message.content],
- });
- lastAuthor = message.author;
+ };
+
+ if (message.role) {
+ newMessage.role = message.role;
+ } else {
+ newMessage.author = message.author;
+ }
+
+ groupedMessages.push(newMessage);
+ lastAuthor = author;
// If same author, append content to the last group
} else {
groupedMessages[groupedMessages.length - 1].content.push(message.content);
}
}
+ groupedMessages = groupedMessages.map((msg, i) => {
+ const isLast = i === groupedMessages.length - 1;
+ if (msg.content.length === 1) {
+ const content = msg.content[0];
+ return {
+ ...msg,
+ // reason: final assistant content cannot end with trailing whitespace
+ content:
+ isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
+ ? content?.trim()
+ : content,
+ };
+ }
+
+ if (!this.useMessages && msg.tokenCount) {
+ delete msg.tokenCount;
+ }
+
+ return msg;
+ });
+
let identityPrefix = '';
if (this.options.userLabel) {
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
@@ -154,9 +372,10 @@ class AnthropicClient extends BaseClient {
// Prompt AI to respond, empty if last message was from AI
let isEdited = lastAuthor === this.assistantLabel;
const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
- let currentTokenCount = isEdited
- ? this.getTokenCount(promptPrefix)
- : this.getTokenCount(promptSuffix);
+ let currentTokenCount =
+ isEdited || this.useMessages
+ ? this.getTokenCount(promptPrefix)
+ : this.getTokenCount(promptSuffix);
let promptBody = '';
const maxTokenCount = this.maxPromptTokens;
@@ -224,7 +443,69 @@ class AnthropicClient extends BaseClient {
return true;
};
- await buildPromptBody();
+ const messagesPayload = [];
+ const buildMessagesPayload = async () => {
+ let canContinue = true;
+
+ if (promptPrefix) {
+ this.systemMessage = promptPrefix;
+ }
+
+ while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
+ const message = groupedMessages.pop();
+
+ let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
+
+ const newTokenCount = currentTokenCount + tokenCountForMessage;
+ const exceededMaxCount = newTokenCount > maxTokenCount;
+
+ if (exceededMaxCount && messagesPayload.length === 0) {
+ throw new Error(
+ `Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
+ );
+ } else if (exceededMaxCount) {
+ canContinue = false;
+ break;
+ }
+
+ delete message.tokenCount;
+ messagesPayload.unshift(message);
+ currentTokenCount = newTokenCount;
+
+ // Switch off isEdited after using it once
+ if (isEdited && message.role === 'assistant') {
+ isEdited = false;
+ }
+
+ // Wait for next tick to avoid blocking the event loop
+ await new Promise((resolve) => setImmediate(resolve));
+ }
+ };
+
+ const processTokens = () => {
+ // Add 2 tokens for metadata after all messages have been counted.
+ currentTokenCount += 2;
+
+ // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
+ this.modelOptions.maxOutputTokens = Math.min(
+ this.maxContextTokens - currentTokenCount,
+ this.maxResponseTokens,
+ );
+ };
+
+ if (this.modelOptions.model.startsWith('claude-3')) {
+ await buildMessagesPayload();
+ processTokens();
+ return {
+ prompt: messagesPayload,
+ context: messagesInWindow,
+ promptTokens: currentTokenCount,
+ tokenCountMap,
+ };
+ } else {
+ await buildPromptBody();
+ processTokens();
+ }
if (nextMessage.remove) {
promptBody = promptBody.replace(nextMessage.messageString, '');
@@ -234,22 +515,26 @@ class AnthropicClient extends BaseClient {
let prompt = `${promptBody}${promptSuffix}`;
- // Add 2 tokens for metadata after all messages have been counted.
- currentTokenCount += 2;
-
- // Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
- this.modelOptions.maxOutputTokens = Math.min(
- this.maxContextTokens - currentTokenCount,
- this.maxResponseTokens,
- );
-
- return { prompt, context };
+ return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
}
getCompletion() {
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
}
+ /**
+ * Creates a message or completion response using the Anthropic client.
+ * @param {Anthropic} client - The Anthropic client instance.
+ * @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
+ * @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
+ * @returns {Promise} The response from the Anthropic client.
+ */
+ async createResponse(client, options, useMessages) {
+ return useMessages ?? this.useMessages
+ ? await client.messages.create(options)
+ : await client.completions.create(options);
+ }
+
async sendCompletion(payload, { onProgress, abortController }) {
if (!abortController) {
abortController = new AbortController();
@@ -279,36 +564,88 @@ class AnthropicClient extends BaseClient {
topP: top_p,
topK: top_k,
} = this.modelOptions;
+
const requestOptions = {
- prompt: payload,
model,
stream: stream || true,
- max_tokens_to_sample: maxOutputTokens || 1500,
stop_sequences,
temperature,
metadata,
top_p,
top_k,
};
+
+ if (this.useMessages) {
+ requestOptions.messages = payload;
+ requestOptions.max_tokens = maxOutputTokens || 1500;
+ } else {
+ requestOptions.prompt = payload;
+ requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
+ }
+
+ if (this.systemMessage) {
+ requestOptions.system = this.systemMessage;
+ }
+
logger.debug('[AnthropicClient]', { ...requestOptions });
- const response = await client.completions.create(requestOptions);
- signal.addEventListener('abort', () => {
- logger.debug('[AnthropicClient] message aborted!');
- response.controller.abort();
- });
+ const handleChunk = (currentChunk) => {
+ if (currentChunk) {
+ text += currentChunk;
+ onProgress(currentChunk);
+ }
+ };
+
+ const maxRetries = 3;
+ async function processResponse() {
+ let attempts = 0;
+
+ while (attempts < maxRetries) {
+ let response;
+ try {
+ response = await this.createResponse(client, requestOptions);
+
+ signal.addEventListener('abort', () => {
+ logger.debug('[AnthropicClient] message aborted!');
+ if (response.controller?.abort) {
+ response.controller.abort();
+ }
+ });
+
+ for await (const completion of response) {
+ // Handle each completion as before
+ if (completion?.delta?.text) {
+ handleChunk(completion.delta.text);
+ } else if (completion.completion) {
+ handleChunk(completion.completion);
+ }
+ }
- for await (const completion of response) {
- // Uncomment to debug message stream
- // logger.debug(completion);
- text += completion.completion;
- onProgress(completion.completion);
+ // Successful processing, exit loop
+ break;
+ } catch (error) {
+ attempts += 1;
+ logger.warn(
+ `User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
+ );
+
+ if (attempts < maxRetries) {
+ await delayBeforeRetry(attempts, 350);
+ } else {
+ throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
+ }
+ } finally {
+ signal.removeEventListener('abort', () => {
+ logger.debug('[AnthropicClient] message aborted!');
+ if (response.controller?.abort) {
+ response.controller.abort();
+ }
+ });
+ }
+ }
}
- signal.removeEventListener('abort', () => {
- logger.debug('[AnthropicClient] message aborted!');
- response.controller.abort();
- });
+ await processResponse.bind(this)();
return text.trim();
}
@@ -317,6 +654,7 @@ class AnthropicClient extends BaseClient {
return {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
+ resendFiles: this.options.resendFiles,
...this.modelOptions,
};
}
@@ -342,6 +680,78 @@ class AnthropicClient extends BaseClient {
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
}
+
+ /**
+ * Generates a concise title for a conversation based on the user's input text and response.
+ * Involves sending a chat completion request with specific instructions for title generation.
+ *
+ * This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
+ *
+ * @param {Object} params - The parameters for the conversation title generation.
+ * @param {string} params.text - The user's input.
+ * @param {string} [params.responseText=''] - The AI's immediate response to the user.
+ *
+ * @returns {Promise} A promise that resolves to the generated conversation title.
+ * In case of failure, it will return the default title, "New Chat".
+ */
+ async titleConvo({ text, responseText = '' }) {
+ let title = 'New Chat';
+ const convo = `
+ ${truncateText(text)}
+
+
+ ${JSON.stringify(truncateText(responseText))}
+ `;
+
+ const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
+ const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
+ const system = titleFunctionPrompt;
+
+ const titleChatCompletion = async () => {
+ const content = `
+ ${convo}
+
+
+ Please generate a title for this conversation.`;
+
+ const titleMessage = { role: 'user', content };
+ const requestOptions = {
+ model,
+ temperature: 0.3,
+ max_tokens: 1024,
+ system,
+ stop_sequences: ['\n\nHuman:', '\n\nAssistant', ''],
+ messages: [titleMessage],
+ };
+
+ try {
+ const response = await this.createResponse(this.getClient(), requestOptions, true);
+ let promptTokens = response?.usage?.input_tokens;
+ let completionTokens = response?.usage?.output_tokens;
+ if (!promptTokens) {
+ promptTokens = this.getTokenCountForMessage(titleMessage);
+ promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
+ }
+ if (!completionTokens) {
+ completionTokens = this.getTokenCountForMessage(response.content[0]);
+ }
+ await this.recordTokenUsage({
+ model,
+ promptTokens,
+ completionTokens,
+ context: 'title',
+ });
+ const text = response.content[0].text;
+ title = parseTitleFromPrompt(text);
+ } catch (e) {
+ logger.error('[AnthropicClient] There was an issue generating the title', e);
+ }
+ };
+
+ await titleChatCompletion();
+ logger.debug('[AnthropicClient] Convo Title: ' + title);
+ return title;
+ }
}
module.exports = AnthropicClient;
diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js
index aa39084b9fa..f7ed3b9cf18 100644
--- a/api/app/clients/BaseClient.js
+++ b/api/app/clients/BaseClient.js
@@ -1,8 +1,9 @@
const crypto = require('crypto');
-const { supportsBalanceCheck } = require('librechat-data-provider');
+const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
+const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -22,7 +23,7 @@ class BaseClient {
throw new Error('Method \'setOptions\' must be implemented.');
}
- getCompletion() {
+ async getCompletion() {
throw new Error('Method \'getCompletion\' must be implemented.');
}
@@ -46,10 +47,6 @@ class BaseClient {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response);
}
- async addPreviousAttachments(messages) {
- return messages;
- }
-
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
promptTokens,
@@ -77,7 +74,7 @@ class BaseClient {
const saveOptions = this.getSaveOptions();
this.abortController = opts.abortController ?? new AbortController();
const conversationId = opts.conversationId ?? crypto.randomUUID();
- const parentMessageId = opts.parentMessageId ?? '00000000-0000-0000-0000-000000000000';
+ const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID();
let responseMessageId = opts.responseMessageId ?? crypto.randomUUID();
let head = isEdited ? responseMessageId : parentMessageId;
@@ -428,7 +425,10 @@ class BaseClient {
await this.saveMessageToDatabase(userMessage, saveOptions, user);
}
- if (isEnabled(process.env.CHECK_BALANCE) && supportsBalanceCheck[this.options.endpoint]) {
+ if (
+ isEnabled(process.env.CHECK_BALANCE) &&
+ supportsBalanceCheck[this.options.endpointType ?? this.options.endpoint]
+ ) {
await checkBalance({
req: this.options.req,
res: this.options.res,
@@ -438,11 +438,14 @@ class BaseClient {
amount: promptTokens,
model: this.modelOptions.model,
endpoint: this.options.endpoint,
+ endpointTokenConfig: this.options.endpointTokenConfig,
},
});
}
const completion = await this.sendCompletion(payload, opts);
+ this.abortController.requestCompleted = true;
+
const responseMessage = {
messageId: responseMessageId,
conversationId,
@@ -453,6 +456,7 @@ class BaseClient {
sender: this.sender,
text: addSpaceIfNeeded(generation) + completion,
promptTokens,
+ ...(this.metadata ?? {}),
};
if (
@@ -548,7 +552,7 @@ class BaseClient {
*
* Each message object should have an 'id' or 'messageId' property and may have a 'parentMessageId' property.
* The 'parentMessageId' is the ID of the message that the current message is a reply to.
- * If 'parentMessageId' is not present, null, or is '00000000-0000-0000-0000-000000000000',
+ * If 'parentMessageId' is not present, null, or is Constants.NO_PARENT,
* the message is considered a root message.
*
* @param {Object} options - The options for the function.
@@ -603,9 +607,7 @@ class BaseClient {
}
currentMessageId =
- message.parentMessageId === '00000000-0000-0000-0000-000000000000'
- ? null
- : message.parentMessageId;
+ message.parentMessageId === Constants.NO_PARENT ? null : message.parentMessageId;
}
orderedMessages.reverse();
@@ -679,6 +681,54 @@ class BaseClient {
return await this.sendCompletion(payload, opts);
}
+
+ /**
+ *
+ * @param {TMessage[]} _messages
+ * @returns {Promise}
+ */
+ async addPreviousAttachments(_messages) {
+ if (!this.options.resendFiles) {
+ return _messages;
+ }
+
+ /**
+ *
+ * @param {TMessage} message
+ */
+ const processMessage = async (message) => {
+ if (!this.message_file_map) {
+ /** @type {Record */
+ this.message_file_map = {};
+ }
+
+ const fileIds = message.files.map((file) => file.file_id);
+ const files = await getFiles({
+ file_id: { $in: fileIds },
+ });
+
+ await this.addImageURLs(message, files);
+
+ this.message_file_map[message.messageId] = files;
+ return message;
+ };
+
+ const promises = [];
+
+ for (const message of _messages) {
+ if (!message.files) {
+ promises.push(message);
+ continue;
+ }
+
+ promises.push(processMessage(message));
+ }
+
+ const messages = await Promise.all(promises);
+
+ this.checkVisionRequest(Object.values(this.message_file_map ?? {}).flat());
+ return messages;
+ }
}
module.exports = BaseClient;
diff --git a/api/app/clients/ChatGPTClient.js b/api/app/clients/ChatGPTClient.js
index c1ae54fdf08..d218849513a 100644
--- a/api/app/clients/ChatGPTClient.js
+++ b/api/app/clients/ChatGPTClient.js
@@ -1,9 +1,19 @@
-const crypto = require('crypto');
const Keyv = require('keyv');
+const crypto = require('crypto');
+const {
+ EModelEndpoint,
+ resolveHeaders,
+ CohereConstants,
+ mapModelToAzureConfig,
+} = require('librechat-data-provider');
+const { CohereClient } = require('cohere-ai');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
+const { createCoherePayload } = require('./llm');
const { Agent, ProxyAgent } = require('undici');
const BaseClient = require('./BaseClient');
+const { logger } = require('~/config');
+const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
const CHATGPT_MODEL = 'gpt-3.5-turbo';
const tokenizersCache = {};
@@ -140,11 +150,13 @@ class ChatGPTClient extends BaseClient {
return tokenizer;
}
- async getCompletion(input, onProgress, abortController = null) {
+ /** @type {getCompletion} */
+ async getCompletion(input, onProgress, onTokenProgress, abortController = null) {
if (!abortController) {
abortController = new AbortController();
}
- const modelOptions = { ...this.modelOptions };
+
+ let modelOptions = { ...this.modelOptions };
if (typeof onProgress === 'function') {
modelOptions.stream = true;
}
@@ -159,56 +171,176 @@ class ChatGPTClient extends BaseClient {
}
const { debug } = this.options;
- const url = this.completionsUrl;
+ let baseURL = this.completionsUrl;
if (debug) {
console.debug();
- console.debug(url);
+ console.debug(baseURL);
console.debug(modelOptions);
console.debug();
}
- if (this.azure || this.options.azure) {
- // Azure does not accept `model` in the body, so we need to remove it.
- delete modelOptions.model;
- }
-
const opts = {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
- body: JSON.stringify(modelOptions),
dispatcher: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
};
- if (this.apiKey && this.options.azure) {
- opts.headers['api-key'] = this.apiKey;
+ if (this.isVisionModel) {
+ modelOptions.max_tokens = 4000;
+ }
+
+ /** @type {TAzureConfig | undefined} */
+ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
+
+ const isAzure = this.azure || this.options.azure;
+ if (
+ (isAzure && this.isVisionModel && azureConfig) ||
+ (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
+ ) {
+ const { modelGroupMap, groupMap } = azureConfig;
+ const {
+ azureOptions,
+ baseURL,
+ headers = {},
+ serverless,
+ } = mapModelToAzureConfig({
+ modelName: modelOptions.model,
+ modelGroupMap,
+ groupMap,
+ });
+ opts.headers = resolveHeaders(headers);
+ this.langchainProxy = extractBaseURL(baseURL);
+ this.apiKey = azureOptions.azureOpenAIApiKey;
+
+ const groupName = modelGroupMap[modelOptions.model].group;
+ this.options.addParams = azureConfig.groupMap[groupName].addParams;
+ this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
+ // Note: `forcePrompt` not re-assigned as only chat models are vision models
+
+ this.azure = !serverless && azureOptions;
+ this.azureEndpoint =
+ !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
+ }
+
+ if (this.options.headers) {
+ opts.headers = { ...opts.headers, ...this.options.headers };
+ }
+
+ if (isAzure) {
+ // Azure does not accept `model` in the body, so we need to remove it.
+ delete modelOptions.model;
+
+ baseURL = this.langchainProxy
+ ? constructAzureURL({
+ baseURL: this.langchainProxy,
+ azureOptions: this.azure,
+ })
+ : this.azureEndpoint.split(/(? msg.role === 'system');
+
+ if (systemMessageIndex > 0) {
+ const [systemMessage] = messages.splice(systemMessageIndex, 1);
+ messages.unshift(systemMessage);
+ }
+
+ modelOptions.messages = messages;
+
+ if (messages.length === 1 && messages[0].role === 'system') {
+ modelOptions.messages[0].role = 'user';
+ }
+ }
+
+ if (this.options.addParams && typeof this.options.addParams === 'object') {
+ modelOptions = {
+ ...modelOptions,
+ ...this.options.addParams,
+ };
+ logger.debug('[ChatGPTClient] chatCompletion: added params', {
+ addParams: this.options.addParams,
+ modelOptions,
+ });
+ }
+
+ if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
+ this.options.dropParams.forEach((param) => {
+ delete modelOptions[param];
+ });
+ logger.debug('[ChatGPTClient] chatCompletion: dropped params', {
+ dropParams: this.options.dropParams,
+ modelOptions,
+ });
+ }
+
+ if (baseURL.startsWith(CohereConstants.API_URL)) {
+ const payload = createCoherePayload({ modelOptions });
+ return await this.cohereChatCompletion({ payload, onTokenProgress });
+ }
+
+ if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) {
+ baseURL = baseURL.split('v1')[0] + 'v1/completions';
+ } else if (
+ baseURL.includes('v1') &&
+ !baseURL.includes('/chat/completions') &&
+ this.isChatCompletion
+ ) {
+ baseURL = baseURL.split('v1')[0] + 'v1/chat/completions';
+ }
+
+ const BASE_URL = new URL(baseURL);
+ if (opts.defaultQuery) {
+ Object.entries(opts.defaultQuery).forEach(([key, value]) => {
+ BASE_URL.searchParams.append(key, value);
+ });
+ delete opts.defaultQuery;
+ }
+
+ const completionsURL = BASE_URL.toString();
+ opts.body = JSON.stringify(modelOptions);
+
if (modelOptions.stream) {
// eslint-disable-next-line no-async-promise-executor
return new Promise(async (resolve, reject) => {
try {
let done = false;
- await fetchEventSource(url, {
+ await fetchEventSource(completionsURL, {
...opts,
signal: abortController.signal,
async onopen(response) {
@@ -236,7 +368,6 @@ class ChatGPTClient extends BaseClient {
// workaround for private API not sending [DONE] event
if (!done) {
onProgress('[DONE]');
- abortController.abort();
resolve();
}
},
@@ -249,14 +380,13 @@ class ChatGPTClient extends BaseClient {
},
onmessage(message) {
if (debug) {
- // console.debug(message);
+ console.debug(message);
}
if (!message.data || message.event === 'ping') {
return;
}
if (message.data === '[DONE]') {
onProgress('[DONE]');
- abortController.abort();
resolve();
done = true;
return;
@@ -269,7 +399,7 @@ class ChatGPTClient extends BaseClient {
}
});
}
- const response = await fetch(url, {
+ const response = await fetch(completionsURL, {
...opts,
signal: abortController.signal,
});
@@ -287,6 +417,35 @@ class ChatGPTClient extends BaseClient {
return response.json();
}
+ /** @type {cohereChatCompletion} */
+ async cohereChatCompletion({ payload, onTokenProgress }) {
+ const cohere = new CohereClient({
+ token: this.apiKey,
+ environment: this.completionsUrl,
+ });
+
+ if (!payload.stream) {
+ const chatResponse = await cohere.chat(payload);
+ return chatResponse.text;
+ }
+
+ const chatStream = await cohere.chatStream(payload);
+ let reply = '';
+ for await (const message of chatStream) {
+ if (!message) {
+ continue;
+ }
+
+ if (message.eventType === 'text-generation' && message.text) {
+ onTokenProgress(message.text);
+ } else if (message.eventType === 'stream-end' && message.response) {
+ reply = message.response.text;
+ }
+ }
+
+ return reply;
+ }
+
async generateTitle(userMessage, botMessage) {
const instructionsPayload = {
role: 'system',
diff --git a/api/app/clients/GoogleClient.js b/api/app/clients/GoogleClient.js
index 950cc8d1116..c5edcb275a8 100644
--- a/api/app/clients/GoogleClient.js
+++ b/api/app/clients/GoogleClient.js
@@ -4,16 +4,17 @@ const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
-const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
+ validateVisionModel,
getResponseSender,
- EModelEndpoint,
endpointSettings,
+ EModelEndpoint,
AuthKeys,
} = require('librechat-data-provider');
+const { encodeAndFormat } = require('~/server/services/Files/images');
+const { formatMessage, createContextHandlers } = require('./prompts');
const { getModelMaxTokens } = require('~/utils');
-const { formatMessage } = require('./prompts');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -123,18 +124,11 @@ class GoogleClient extends BaseClient {
// stop: modelOptions.stop // no stop method for now
};
- if (this.options.attachments) {
- this.modelOptions.model = 'gemini-pro-vision';
- }
+ this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
- this.isVisionModel = validateVisionModel(this.modelOptions.model);
const { isGenerativeModel } = this;
- if (this.isVisionModel && !this.options.attachments) {
- this.modelOptions.model = 'gemini-pro';
- this.isVisionModel = false;
- }
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
const { isChatModel } = this;
this.isTextModel =
@@ -219,6 +213,33 @@ class GoogleClient extends BaseClient {
return this;
}
+ /**
+ *
+ * Checks if the model is a vision model based on request attachments and sets the appropriate options:
+ * @param {MongoFile[]} attachments
+ */
+ checkVisionRequest(attachments) {
+ /* Validation vision request */
+ this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision';
+ const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
+
+ if (
+ attachments &&
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
+ availableModels?.includes(this.defaultVisionModel) &&
+ !this.isVisionModel
+ ) {
+ this.modelOptions.model = this.defaultVisionModel;
+ this.isVisionModel = true;
+ }
+
+ if (this.isVisionModel && !attachments) {
+ this.modelOptions.model = 'gemini-pro';
+ this.isVisionModel = false;
+ }
+ }
+
formatMessages() {
return ((message) => ({
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
@@ -226,18 +247,45 @@ class GoogleClient extends BaseClient {
})).bind(this);
}
- async buildVisionMessages(messages = [], parentMessageId) {
- const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
- const attachments = await this.options.attachments;
+ /**
+ *
+ * Adds image URLs to the message object and returns the files
+ *
+ * @param {TMessage[]} messages
+ * @param {MongoFile[]} files
+ * @returns {Promise}
+ */
+ async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(
this.options.req,
- attachments.filter((file) => file.type.includes('image')),
+ attachments,
EModelEndpoint.google,
);
+ message.image_urls = image_urls.length ? image_urls : undefined;
+ return files;
+ }
+ async buildVisionMessages(messages = [], parentMessageId) {
+ const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
+ this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text);
+
+ if (this.contextHandlers) {
+ for (const file of attachments) {
+ if (file.embedded) {
+ this.contextHandlers?.processFile(file);
+ continue;
+ }
+ }
+
+ this.augmentedPrompt = await this.contextHandlers.createContext();
+ this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
+ }
+
+ const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
+
+ const files = await this.addImageURLs(latestMessage, attachments);
- latestMessage.image_urls = image_urls;
this.options.attachments = files;
latestMessage.text = prompt;
@@ -264,7 +312,7 @@ class GoogleClient extends BaseClient {
);
}
- if (this.options.attachments) {
+ if (this.options.attachments && this.isGenerativeModel) {
return this.buildVisionMessages(messages, parentMessageId);
}
diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js
index ca0c8d84248..f66afda4abd 100644
--- a/api/app/clients/OpenAIClient.js
+++ b/api/app/clients/OpenAIClient.js
@@ -1,21 +1,35 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
-const { getResponseSender, ImageDetailCost, ImageDetail } = require('librechat-data-provider');
+const {
+ ImageDetail,
+ EModelEndpoint,
+ resolveHeaders,
+ ImageDetailCost,
+ CohereConstants,
+ getResponseSender,
+ validateVisionModel,
+ mapModelToAzureConfig,
+} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
- getModelMaxTokens,
- genAzureChatCompletion,
extractBaseURL,
constructAzureURL,
+ getModelMaxTokens,
+ genAzureChatCompletion,
} = require('~/utils');
-const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
-const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
+const {
+ truncateText,
+ formatMessage,
+ createContextHandlers,
+ CUT_OFF_PROMPT,
+ titleInstruction,
+} = require('./prompts');
+const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
const { createLLM, RunManager } = require('./llm');
const ChatGPTClient = require('./ChatGPTClient');
const { isEnabled } = require('~/server/utils');
-const { getFiles } = require('~/models/File');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
@@ -32,7 +46,10 @@ class OpenAIClient extends BaseClient {
super(apiKey, options);
this.ChatGPTClient = new ChatGPTClient();
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
+ /** @type {getCompletion} */
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
+ /** @type {cohereChatCompletion} */
+ this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this);
this.contextStrategy = options.contextStrategy
? options.contextStrategy.toLowerCase()
: 'discard';
@@ -40,6 +57,10 @@ class OpenAIClient extends BaseClient {
/** @type {AzureOptions} */
this.azure = options.azure || false;
this.setOptions(options);
+ this.metadata = {};
+
+ /** @type {string | undefined} - The API Completions URL */
+ this.completionsUrl;
}
// TODO: PluginsClient calls this 3x, unneeded
@@ -83,7 +104,12 @@ class OpenAIClient extends BaseClient {
};
}
- this.checkVisionRequest(this.options.attachments);
+ this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview';
+ if (typeof this.options.attachments?.then === 'function') {
+ this.options.attachments.then((attachments) => this.checkVisionRequest(attachments));
+ } else {
+ this.checkVisionRequest(this.options.attachments);
+ }
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
@@ -131,7 +157,13 @@ class OpenAIClient extends BaseClient {
const { isChatGptModel } = this;
this.isUnofficialChatGptModel =
model.startsWith('text-chat') || model.startsWith('text-davinci-002-render');
- this.maxContextTokens = getModelMaxTokens(model) ?? 4095; // 1 less than maximum
+
+ this.maxContextTokens =
+ getModelMaxTokens(
+ model,
+ this.options.endpointType ?? this.options.endpoint,
+ this.options.endpointTokenConfig,
+ ) ?? 4095; // 1 less than maximum
if (this.shouldSummarize) {
this.maxContextTokens = Math.floor(this.maxContextTokens / 2);
@@ -208,13 +240,20 @@ class OpenAIClient extends BaseClient {
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
- * @param {Array | MongoFile[]> | Record} attachments
+ * @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
- this.isVisionModel = validateVisionModel(this.modelOptions.model);
+ const availableModels = this.options.modelsConfig?.[this.options.endpoint];
+ this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
- if (attachments && !this.isVisionModel) {
- this.modelOptions.model = 'gpt-4-vision-preview';
+ const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
+ if (
+ attachments &&
+ attachments.some((file) => file?.type && file?.type?.includes('image')) &&
+ visionModelAvailable &&
+ !this.isVisionModel
+ ) {
+ this.modelOptions.model = this.defaultVisionModel;
this.isVisionModel = true;
}
@@ -349,7 +388,7 @@ class OpenAIClient extends BaseClient {
return {
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
- resendImages: this.options.resendImages,
+ resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
...this.modelOptions,
};
@@ -363,54 +402,6 @@ class OpenAIClient extends BaseClient {
};
}
- /**
- *
- * @param {TMessage[]} _messages
- * @returns {TMessage[]}
- */
- async addPreviousAttachments(_messages) {
- if (!this.options.resendImages) {
- return _messages;
- }
-
- /**
- *
- * @param {TMessage} message
- */
- const processMessage = async (message) => {
- if (!this.message_file_map) {
- /** @type {Record */
- this.message_file_map = {};
- }
-
- const fileIds = message.files.map((file) => file.file_id);
- const files = await getFiles({
- file_id: { $in: fileIds },
- });
-
- await this.addImageURLs(message, files);
-
- this.message_file_map[message.messageId] = files;
- return message;
- };
-
- const promises = [];
-
- for (const message of _messages) {
- if (!message.files) {
- promises.push(message);
- continue;
- }
-
- promises.push(processMessage(message));
- }
-
- const messages = await Promise.all(promises);
-
- this.checkVisionRequest(this.message_file_map);
- return messages;
- }
-
/**
*
* Adds image URLs to the message object and returns the files
@@ -421,8 +412,7 @@ class OpenAIClient extends BaseClient {
*/
async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments);
-
- message.image_urls = image_urls;
+ message.image_urls = image_urls.length ? image_urls : undefined;
return files;
}
@@ -450,23 +440,9 @@ class OpenAIClient extends BaseClient {
let promptTokens;
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
- if (promptPrefix) {
- promptPrefix = `Instructions:\n${promptPrefix}`;
- instructions = {
- role: 'system',
- name: 'instructions',
- content: promptPrefix,
- };
-
- if (this.contextStrategy) {
- instructions.tokenCount = this.getTokenCountForMessage(instructions);
- }
- }
if (this.options.attachments) {
- const attachments = (await this.options.attachments).filter((file) =>
- file.type.includes('image'),
- );
+ const attachments = await this.options.attachments;
if (this.message_file_map) {
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
@@ -484,6 +460,13 @@ class OpenAIClient extends BaseClient {
this.options.attachments = files;
}
+ if (this.message_file_map) {
+ this.contextHandlers = createContextHandlers(
+ this.options.req,
+ orderedMessages[orderedMessages.length - 1].text,
+ );
+ }
+
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
@@ -502,6 +485,11 @@ class OpenAIClient extends BaseClient {
if (this.message_file_map && this.message_file_map[message.messageId]) {
const attachments = this.message_file_map[message.messageId];
for (const file of attachments) {
+ if (file.embedded) {
+ this.contextHandlers?.processFile(file);
+ continue;
+ }
+
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
height: file.height,
@@ -513,6 +501,24 @@ class OpenAIClient extends BaseClient {
return formattedMessage;
});
+ if (this.contextHandlers) {
+ this.augmentedPrompt = await this.contextHandlers.createContext();
+ promptPrefix = this.augmentedPrompt + promptPrefix;
+ }
+
+ if (promptPrefix) {
+ promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
+ instructions = {
+ role: 'system',
+ name: 'instructions',
+ content: promptPrefix,
+ };
+
+ if (this.contextStrategy) {
+ instructions.tokenCount = this.getTokenCountForMessage(instructions);
+ }
+ }
+
// TODO: need to handle interleaving instructions better
if (this.contextStrategy) {
({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({
@@ -540,15 +546,16 @@ class OpenAIClient extends BaseClient {
return result;
}
+ /** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
let reply = '';
let result = null;
let streamResult = null;
this.modelOptions.user = this.user;
const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null;
- const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion);
+ const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion || typeof Bun !== 'undefined');
if (typeof opts.onProgress === 'function' && useOldMethod) {
- await this.getCompletion(
+ const completionResult = await this.getCompletion(
payload,
(progressMessage) => {
if (progressMessage === '[DONE]') {
@@ -581,12 +588,16 @@ class OpenAIClient extends BaseClient {
opts.onProgress(token);
reply += token;
},
+ opts.onProgress,
opts.abortController || new AbortController(),
);
+
+ if (completionResult && typeof completionResult === 'string') {
+ reply = completionResult;
+ }
} else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) {
reply = await this.chatCompletion({
payload,
- clientOptions: opts,
onProgress: opts.onProgress,
abortController: opts.abortController,
});
@@ -594,9 +605,14 @@ class OpenAIClient extends BaseClient {
result = await this.getCompletion(
payload,
null,
+ opts.onProgress,
opts.abortController || new AbortController(),
);
+ if (result && typeof result === 'string') {
+ return result.trim();
+ }
+
logger.debug('[OpenAIClient] sendCompletion: result', result);
if (this.isChatCompletion) {
@@ -606,11 +622,11 @@ class OpenAIClient extends BaseClient {
}
}
- if (streamResult && typeof opts.addMetadata === 'function') {
+ if (streamResult) {
const { finish_reason } = streamResult.choices[0];
- opts.addMetadata({ finish_reason });
+ this.metadata = { finish_reason };
}
- return reply.trim();
+ return (reply ?? '').trim();
}
initializeLLM({
@@ -624,6 +640,7 @@ class OpenAIClient extends BaseClient {
context,
tokenBuffer,
initialMessageCount,
+ conversationId,
}) {
const modelOptions = {
modelName: modelName ?? model,
@@ -653,6 +670,16 @@ class OpenAIClient extends BaseClient {
};
}
+ const { headers } = this.options;
+ if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
+ configOptions.baseOptions = {
+ headers: resolveHeaders({
+ ...headers,
+ ...configOptions?.baseOptions?.headers,
+ }),
+ };
+ }
+
if (this.options.proxy) {
configOptions.httpAgent = new HttpsProxyAgent(this.options.proxy);
configOptions.httpsAgent = new HttpsProxyAgent(this.options.proxy);
@@ -671,7 +698,7 @@ class OpenAIClient extends BaseClient {
callbacks: runManager.createCallbacks({
context,
tokenBuffer,
- conversationId: this.conversationId,
+ conversationId: this.conversationId ?? conversationId,
initialMessageCount,
}),
});
@@ -687,12 +714,13 @@ class OpenAIClient extends BaseClient {
*
* @param {Object} params - The parameters for the conversation title generation.
* @param {string} params.text - The user's input.
+ * @param {string} [params.conversationId] - The current conversationId, if not already defined on client initialization.
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
*
* @returns {Promise} A promise that resolves to the generated conversation title.
* In case of failure, it will return the default title, "New Chat".
*/
- async titleConvo({ text, responseText = '' }) {
+ async titleConvo({ text, conversationId, responseText = '' }) {
let title = 'New Chat';
const convo = `||>User:
"${truncateText(text)}"
@@ -712,6 +740,39 @@ class OpenAIClient extends BaseClient {
max_tokens: 16,
};
+ /** @type {TAzureConfig | undefined} */
+ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
+
+ const resetTitleOptions = !!(
+ (this.azure && azureConfig) ||
+ (azureConfig && this.options.endpoint === EModelEndpoint.azureOpenAI)
+ );
+
+ if (resetTitleOptions) {
+ const { modelGroupMap, groupMap } = azureConfig;
+ const {
+ azureOptions,
+ baseURL,
+ headers = {},
+ serverless,
+ } = mapModelToAzureConfig({
+ modelName: modelOptions.model,
+ modelGroupMap,
+ groupMap,
+ });
+
+ this.options.headers = resolveHeaders(headers);
+ this.options.reverseProxyUrl = baseURL ?? null;
+ this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
+ this.apiKey = azureOptions.azureOpenAIApiKey;
+
+ const groupName = modelGroupMap[modelOptions.model].group;
+ this.options.addParams = azureConfig.groupMap[groupName].addParams;
+ this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
+ this.options.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
+ this.azure = !serverless && azureOptions;
+ }
+
const titleChatCompletion = async () => {
modelOptions.model = model;
@@ -723,8 +784,7 @@ class OpenAIClient extends BaseClient {
const instructionsPayload = [
{
role: 'system',
- content: `Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect.
-Write in the detected language. Title in 5 Words or Less. No Punctuation or Quotation. Do not mention the language. All first letters of every word should be capitalized and write the title in User Language only.
+ content: `Please generate ${titleInstruction}
${convo}
@@ -732,10 +792,18 @@ ${convo}
},
];
+ const promptTokens = this.getTokenCountForMessage(instructionsPayload[0]);
+
try {
+ let useChatCompletion = true;
+ if (this.options.reverseProxyUrl === CohereConstants.API_URL) {
+ useChatCompletion = false;
+ }
title = (
- await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true })
+ await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion })
).replaceAll('"', '');
+ const completionTokens = this.getTokenCount(title);
+ this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
} catch (e) {
logger.error(
'[OpenAIClient] There was an issue generating the title with the completion method',
@@ -752,7 +820,12 @@ ${convo}
try {
this.abortController = new AbortController();
- const llm = this.initializeLLM({ ...modelOptions, context: 'title', tokenBuffer: 150 });
+ const llm = this.initializeLLM({
+ ...modelOptions,
+ conversationId,
+ context: 'title',
+ tokenBuffer: 150,
+ });
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
} catch (e) {
if (e?.message?.toLowerCase()?.includes('abort')) {
@@ -779,7 +852,12 @@ ${convo}
// TODO: remove the gpt fallback and make it specific to endpoint
const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {};
const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
- const maxContextTokens = getModelMaxTokens(model) ?? 4095;
+ const maxContextTokens =
+ getModelMaxTokens(
+ model,
+ this.options.endpointType ?? this.options.endpoint,
+ this.options.endpointTokenConfig,
+ ) ?? 4095; // 1 less than maximum
// 3 tokens for the assistant label, and 98 for the summarizer prompt (101)
let promptBuffer = 101;
@@ -877,14 +955,14 @@ ${convo}
}
}
- async recordTokenUsage({ promptTokens, completionTokens }) {
- logger.debug('[OpenAIClient] recordTokenUsage:', { promptTokens, completionTokens });
+ async recordTokenUsage({ promptTokens, completionTokens, context = 'message' }) {
await spendTokens(
{
+ context,
user: this.user,
model: this.modelOptions.model,
- context: 'message',
conversationId: this.conversationId,
+ endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
);
@@ -897,7 +975,7 @@ ${convo}
});
}
- async chatCompletion({ payload, onProgress, clientOptions, abortController = null }) {
+ async chatCompletion({ payload, onProgress, abortController = null }) {
let error = null;
const errorCallback = (err) => (error = err);
let intermediateReply = '';
@@ -918,15 +996,6 @@ ${convo}
}
const baseURL = extractBaseURL(this.completionsUrl);
- // let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions;
- // if (modelOptionsToLog.messages) {
- // _msgsToLog = modelOptionsToLog.messages.map((msg) => {
- // let { content, ...rest } = msg;
-
- // if (content)
- // return { ...rest, content: truncateText(content) };
- // });
- // }
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
const opts = {
baseURL,
@@ -951,6 +1020,38 @@ ${convo}
modelOptions.max_tokens = 4000;
}
+ /** @type {TAzureConfig | undefined} */
+ const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
+
+ if (
+ (this.azure && this.isVisionModel && azureConfig) ||
+ (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
+ ) {
+ const { modelGroupMap, groupMap } = azureConfig;
+ const {
+ azureOptions,
+ baseURL,
+ headers = {},
+ serverless,
+ } = mapModelToAzureConfig({
+ modelName: modelOptions.model,
+ modelGroupMap,
+ groupMap,
+ });
+ opts.defaultHeaders = resolveHeaders(headers);
+ this.langchainProxy = extractBaseURL(baseURL);
+ this.apiKey = azureOptions.azureOpenAIApiKey;
+
+ const groupName = modelGroupMap[modelOptions.model].group;
+ this.options.addParams = azureConfig.groupMap[groupName].addParams;
+ this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
+ // Note: `forcePrompt` not re-assigned as only chat models are vision models
+
+ this.azure = !serverless && azureOptions;
+ this.azureEndpoint =
+ !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
+ }
+
if (this.azure || this.options.azure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
@@ -958,9 +1059,10 @@ ${convo}
opts.baseURL = this.langchainProxy
? constructAzureURL({
baseURL: this.langchainProxy,
- azure: this.azure,
+ azureOptions: this.azure,
})
- : this.azureEndpoint.split(/\/(chat|completion)/)[0];
+ : this.azureEndpoint.split(/(? msg.role === 'system');
+
+ if (systemMessageIndex > 0) {
+ const [systemMessage] = messages.splice(systemMessageIndex, 1);
+ messages.unshift(systemMessage);
+ }
+
+ modelOptions.messages = messages;
+
if (messages.length === 1 && messages[0].role === 'system') {
modelOptions.messages[0].role = 'user';
}
@@ -988,12 +1104,20 @@ ${convo}
...modelOptions,
...this.options.addParams,
};
+ logger.debug('[OpenAIClient] chatCompletion: added params', {
+ addParams: this.options.addParams,
+ modelOptions,
+ });
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
+ logger.debug('[OpenAIClient] chatCompletion: dropped params', {
+ dropParams: this.options.dropParams,
+ modelOptions,
+ });
}
let UnexpectedRoleError = false;
@@ -1009,6 +1133,16 @@ ${convo}
.on('error', (err) => {
handleOpenAIErrors(err, errorCallback, 'stream');
})
+ .on('finalChatCompletion', (finalChatCompletion) => {
+ const finalMessage = finalChatCompletion?.choices?.[0]?.message;
+ if (finalMessage && finalMessage?.role !== 'assistant') {
+ finalChatCompletion.choices[0].message.role = 'assistant';
+ }
+
+ if (finalMessage && !finalMessage?.content?.trim()) {
+ finalChatCompletion.choices[0].message.content = intermediateReply;
+ }
+ })
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({ role: 'assistant', content: intermediateReply });
@@ -1054,12 +1188,20 @@ ${convo}
}
const { message, finish_reason } = chatCompletion.choices[0];
- if (chatCompletion && typeof clientOptions.addMetadata === 'function') {
- clientOptions.addMetadata({ finish_reason });
+ if (chatCompletion) {
+ this.metadata = { finish_reason };
}
logger.debug('[OpenAIClient] chatCompletion response', chatCompletion);
+ if (!message?.content?.trim() && intermediateReply.length) {
+ logger.debug(
+ '[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content',
+ { intermediateReply },
+ );
+ return intermediateReply;
+ }
+
return message.content;
} catch (err) {
if (
@@ -1072,6 +1214,9 @@ ${convo}
err?.message?.includes(
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
) ||
+ err?.message?.includes(
+ 'stream ended without producing a ChatCompletionMessage with role=assistant',
+ ) ||
err?.message?.includes('The server had an error processing your request') ||
err?.message?.includes('missing finish_reason') ||
err?.message?.includes('missing role') ||
diff --git a/api/app/clients/PluginsClient.js b/api/app/clients/PluginsClient.js
index 6118c3547a1..033c122664f 100644
--- a/api/app/clients/PluginsClient.js
+++ b/api/app/clients/PluginsClient.js
@@ -3,6 +3,7 @@ const { CallbackManager } = require('langchain/callbacks');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
+const { processFileURL } = require('~/server/services/Files/process');
const { EModelEndpoint } = require('librechat-data-provider');
const { formatLangChainMessages } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
@@ -30,10 +31,6 @@ class PluginsClient extends OpenAIClient {
super.setOptions(options);
- if (this.functionsAgent && this.agentOptions.model && !this.useOpenRouter) {
- this.agentOptions.model = this.getFunctionModelName(this.agentOptions.model);
- }
-
this.isGpt3 = this.modelOptions?.model?.includes('gpt-3');
if (this.options.reverseProxyUrl) {
@@ -113,6 +110,7 @@ class PluginsClient extends OpenAIClient {
openAIApiKey: this.openAIApiKey,
conversationId: this.conversationId,
fileStrategy: this.options.req.app.locals.fileStrategy,
+ processFileURL,
message,
},
});
diff --git a/api/app/clients/llm/createCoherePayload.js b/api/app/clients/llm/createCoherePayload.js
new file mode 100644
index 00000000000..58803d76f3c
--- /dev/null
+++ b/api/app/clients/llm/createCoherePayload.js
@@ -0,0 +1,85 @@
+const { CohereConstants } = require('librechat-data-provider');
+const { titleInstruction } = require('../prompts/titlePrompts');
+
+// Mapping OpenAI roles to Cohere roles
+const roleMap = {
+ user: CohereConstants.ROLE_USER,
+ assistant: CohereConstants.ROLE_CHATBOT,
+ system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly
+};
+
+/**
+ * Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format.
+ * Now includes handling for "system" roles explicitly mentioned.
+ *
+ * @param {Object} options - Object containing the model options.
+ * @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options.
+ * @returns {CohereChatStreamRequest} Cohere-compatible chat API payload.
+ */
+function createCoherePayload({ modelOptions }) {
+ /** @type {string | undefined} */
+ let preamble;
+ let latestUserMessageContent = '';
+ const {
+ stream,
+ stop,
+ top_p,
+ temperature,
+ frequency_penalty,
+ presence_penalty,
+ max_tokens,
+ messages,
+ model,
+ ...rest
+ } = modelOptions;
+
+ // Filter out the latest user message and transform remaining messages to Cohere's chat_history format
+ let chatHistory = messages.reduce((acc, message, index, arr) => {
+ const isLastUserMessage = index === arr.length - 1 && message.role === 'user';
+
+ const messageContent =
+ typeof message.content === 'string'
+ ? message.content
+ : message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' ');
+
+ if (isLastUserMessage) {
+ latestUserMessageContent = messageContent;
+ } else {
+ acc.push({
+ role: roleMap[message.role] || CohereConstants.ROLE_USER,
+ message: messageContent,
+ });
+ }
+
+ return acc;
+ }, []);
+
+ if (
+ chatHistory.length === 1 &&
+ chatHistory[0].role === CohereConstants.ROLE_SYSTEM &&
+ !latestUserMessageContent.length
+ ) {
+ const message = chatHistory[0].message;
+ latestUserMessageContent = message.includes(titleInstruction)
+ ? CohereConstants.TITLE_MESSAGE
+ : '.';
+ preamble = message;
+ }
+
+ return {
+ message: latestUserMessageContent,
+ model: model,
+ chatHistory,
+ stream: stream ?? false,
+ temperature: temperature,
+ frequencyPenalty: frequency_penalty,
+ presencePenalty: presence_penalty,
+ maxTokens: max_tokens,
+ stopSequences: stop,
+ preamble,
+ p: top_p,
+ ...rest,
+ };
+}
+
+module.exports = createCoherePayload;
diff --git a/api/app/clients/llm/createLLM.js b/api/app/clients/llm/createLLM.js
index 62f2fe86f95..09b29cca8e9 100644
--- a/api/app/clients/llm/createLLM.js
+++ b/api/app/clients/llm/createLLM.js
@@ -55,16 +55,18 @@ function createLLM({
}
if (azure && configOptions.basePath) {
- configOptions.basePath = constructAzureURL({
+ const azureURL = constructAzureURL({
baseURL: configOptions.basePath,
- azure: azureOptions,
+ azureOptions,
});
+ azureOptions.azureOpenAIBasePath = azureURL.split(
+ `/${azureOptions.azureOpenAIApiDeploymentName}`,
+ )[0];
}
return new ChatOpenAI(
{
streaming,
- verbose: true,
credentials,
configuration,
...azureOptions,
diff --git a/api/app/clients/llm/index.js b/api/app/clients/llm/index.js
index 46478ade63b..2e09bbb841b 100644
--- a/api/app/clients/llm/index.js
+++ b/api/app/clients/llm/index.js
@@ -1,7 +1,9 @@
const createLLM = require('./createLLM');
const RunManager = require('./RunManager');
+const createCoherePayload = require('./createCoherePayload');
module.exports = {
createLLM,
RunManager,
+ createCoherePayload,
};
diff --git a/api/app/clients/prompts/createContextHandlers.js b/api/app/clients/prompts/createContextHandlers.js
new file mode 100644
index 00000000000..e48dfd8e672
--- /dev/null
+++ b/api/app/clients/prompts/createContextHandlers.js
@@ -0,0 +1,159 @@
+const axios = require('axios');
+const { isEnabled } = require('~/server/utils');
+const { logger } = require('~/config');
+
+const footer = `Use the context as your learned knowledge to better answer the user.
+
+In your response, remember to follow these guidelines:
+- If you don't know the answer, simply say that you don't know.
+- If you are unsure how to answer, ask for clarification.
+- Avoid mentioning that you obtained the information from the context.
+
+Answer appropriately in the user's language.
+`;
+
+function createContextHandlers(req, userMessageContent) {
+ if (!process.env.RAG_API_URL) {
+ return;
+ }
+
+ const queryPromises = [];
+ const processedFiles = [];
+ const processedIds = new Set();
+ const jwtToken = req.headers.authorization.split(' ')[1];
+ const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT);
+
+ const query = async (file) => {
+ if (useFullContext) {
+ return axios.get(`${process.env.RAG_API_URL}/documents/${file.file_id}/context`, {
+ headers: {
+ Authorization: `Bearer ${jwtToken}`,
+ },
+ });
+ }
+
+ return axios.post(
+ `${process.env.RAG_API_URL}/query`,
+ {
+ file_id: file.file_id,
+ query: userMessageContent,
+ k: 4,
+ },
+ {
+ headers: {
+ Authorization: `Bearer ${jwtToken}`,
+ 'Content-Type': 'application/json',
+ },
+ },
+ );
+ };
+
+ const processFile = async (file) => {
+ if (file.embedded && !processedIds.has(file.file_id)) {
+ try {
+ const promise = query(file);
+ queryPromises.push(promise);
+ processedFiles.push(file);
+ processedIds.add(file.file_id);
+ } catch (error) {
+ logger.error(`Error processing file ${file.filename}:`, error);
+ }
+ }
+ };
+
+ const createContext = async () => {
+ try {
+ if (!queryPromises.length || !processedFiles.length) {
+ return '';
+ }
+
+ const oneFile = processedFiles.length === 1;
+ const header = `The user has attached ${oneFile ? 'a' : processedFiles.length} file${
+ !oneFile ? 's' : ''
+ } to the conversation:`;
+
+ const files = `${
+ oneFile
+ ? ''
+ : `
+ `
+ }${processedFiles
+ .map(
+ (file) => `
+
+ ${file.filename}
+ ${file.type}
+ `,
+ )
+ .join('')}${
+ oneFile
+ ? ''
+ : `
+ `
+ }`;
+
+ const resolvedQueries = await Promise.all(queryPromises);
+
+ const context = resolvedQueries
+ .map((queryResult, index) => {
+ const file = processedFiles[index];
+ let contextItems = queryResult.data;
+
+ const generateContext = (currentContext) =>
+ `
+
+ ${file.filename}
+ ${currentContext}
+
+ `;
+
+ if (useFullContext) {
+ return generateContext(`\n${contextItems}`);
+ }
+
+ contextItems = queryResult.data
+ .map((item) => {
+ const pageContent = item[0].page_content;
+ return `
+
+
+ `;
+ })
+ .join('');
+
+ return generateContext(contextItems);
+ })
+ .join('');
+
+ if (useFullContext) {
+ const prompt = `${header}
+ ${context}
+ ${footer}`;
+
+ return prompt;
+ }
+
+ const prompt = `${header}
+ ${files}
+
+ A semantic search was executed with the user's message as the query, retrieving the following context inside XML tags.
+
+ ${context}
+
+
+ ${footer}`;
+
+ return prompt;
+ } catch (error) {
+ logger.error('Error creating context:', error);
+ throw error;
+ }
+ };
+
+ return {
+ processFile,
+ createContext,
+ };
+}
+
+module.exports = createContextHandlers;
diff --git a/api/app/clients/prompts/createVisionPrompt.js b/api/app/clients/prompts/createVisionPrompt.js
new file mode 100644
index 00000000000..5d8a7bbf51b
--- /dev/null
+++ b/api/app/clients/prompts/createVisionPrompt.js
@@ -0,0 +1,34 @@
+/**
+ * Generates a prompt instructing the user to describe an image in detail, tailored to different types of visual content.
+ * @param {boolean} pluralized - Whether to pluralize the prompt for multiple images.
+ * @returns {string} - The generated vision prompt.
+ */
+const createVisionPrompt = (pluralized = false) => {
+ return `Please describe the image${
+ pluralized ? 's' : ''
+ } in detail, covering relevant aspects such as:
+
+ For photographs, illustrations, or artwork:
+ - The main subject(s) and their appearance, positioning, and actions
+ - The setting, background, and any notable objects or elements
+ - Colors, lighting, and overall mood or atmosphere
+ - Any interesting details, textures, or patterns
+ - The style, technique, or medium used (if discernible)
+
+ For screenshots or images containing text:
+ - The content and purpose of the text
+ - The layout, formatting, and organization of the information
+ - Any notable visual elements, such as logos, icons, or graphics
+ - The overall context or message conveyed by the screenshot
+
+ For graphs, charts, or data visualizations:
+ - The type of graph or chart (e.g., bar graph, line chart, pie chart)
+ - The variables being compared or analyzed
+ - Any trends, patterns, or outliers in the data
+ - The axis labels, scales, and units of measurement
+ - The title, legend, and any additional context provided
+
+ Be as specific and descriptive as possible while maintaining clarity and concision.`;
+};
+
+module.exports = createVisionPrompt;
diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js
index 1b97bc7ffa1..c19eee260af 100644
--- a/api/app/clients/prompts/formatMessages.js
+++ b/api/app/clients/prompts/formatMessages.js
@@ -1,3 +1,4 @@
+const { EModelEndpoint } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
/**
@@ -7,10 +8,16 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
+ * @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @param {Array} [params.image_urls] - The image_urls to attach to the message.
* @returns {(Object)} - The formatted message.
*/
-const formatVisionMessage = ({ message, image_urls }) => {
+const formatVisionMessage = ({ message, image_urls, endpoint }) => {
+ if (endpoint === EModelEndpoint.anthropic) {
+ message.content = [...image_urls, { type: 'text', text: message.content }];
+ return message;
+ }
+
message.content = [{ type: 'text', text: message.content }, ...image_urls];
return message;
@@ -29,10 +36,11 @@ const formatVisionMessage = ({ message, image_urls }) => {
* @param {Array} [params.message.image_urls] - The image_urls attached to the message for Vision API.
* @param {string} [params.userName] - The name of the user.
* @param {string} [params.assistantName] - The name of the assistant.
+ * @param {string} [params.endpoint] - Identifier for specific endpoint handling
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
*/
-const formatMessage = ({ message, userName, assistantName, langChain = false }) => {
+const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => {
let { role: _role, _name, sender, text, content: _content, lc_id } = message;
if (lc_id && lc_id[2] && !langChain) {
const roleMapping = {
@@ -51,7 +59,11 @@ const formatMessage = ({ message, userName, assistantName, langChain = false })
const { image_urls } = message;
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
- return formatVisionMessage({ message: formattedMessage, image_urls: message.image_urls });
+ return formatVisionMessage({
+ message: formattedMessage,
+ image_urls: message.image_urls,
+ endpoint,
+ });
}
if (_name) {
diff --git a/api/app/clients/prompts/formatMessages.spec.js b/api/app/clients/prompts/formatMessages.spec.js
index 636cdb1c8e5..8d4956b3811 100644
--- a/api/app/clients/prompts/formatMessages.spec.js
+++ b/api/app/clients/prompts/formatMessages.spec.js
@@ -1,5 +1,6 @@
-const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages');
+const { Constants } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
+const { formatMessage, formatLangChainMessages, formatFromLangChain } = require('./formatMessages');
describe('formatMessage', () => {
it('formats user message', () => {
@@ -61,7 +62,7 @@ describe('formatMessage', () => {
isCreatedByUser: true,
isEdited: false,
model: null,
- parentMessageId: '00000000-0000-0000-0000-000000000000',
+ parentMessageId: Constants.NO_PARENT,
sender: 'User',
text: 'hi',
tokenCount: 5,
diff --git a/api/app/clients/prompts/index.js b/api/app/clients/prompts/index.js
index 40db3d90439..36bb6f7e283 100644
--- a/api/app/clients/prompts/index.js
+++ b/api/app/clients/prompts/index.js
@@ -4,6 +4,8 @@ const handleInputs = require('./handleInputs');
const instructions = require('./instructions');
const titlePrompts = require('./titlePrompts');
const truncateText = require('./truncateText');
+const createVisionPrompt = require('./createVisionPrompt');
+const createContextHandlers = require('./createContextHandlers');
module.exports = {
...formatMessages,
@@ -12,4 +14,6 @@ module.exports = {
...instructions,
...titlePrompts,
truncateText,
+ createVisionPrompt,
+ createContextHandlers,
};
diff --git a/api/app/clients/prompts/titlePrompts.js b/api/app/clients/prompts/titlePrompts.js
index 1e893ba295d..83d8909f3a7 100644
--- a/api/app/clients/prompts/titlePrompts.js
+++ b/api/app/clients/prompts/titlePrompts.js
@@ -27,7 +27,63 @@ ${convo}`,
return titlePrompt;
};
+const titleInstruction =
+ 'a concise, 5-word-or-less title for the conversation, using its same language, with no punctuation. Apply title case conventions appropriate for the language. For English, use AP Stylebook Title Case. Never directly mention the language name or the word "title"';
+const titleFunctionPrompt = `In this environment you have access to a set of tools you can use to generate the conversation title.
+
+You may call them like this:
+
+
+$TOOL_NAME
+
+<$PARAMETER_NAME>$PARAMETER_VALUE$PARAMETER_NAME>
+...
+
+
+
+
+Here are the tools available:
+
+
+submit_title
+
+Submit a brief title in the conversation's language, following the parameter description closely.
+
+
+
+title
+string
+${titleInstruction}
+
+
+
+`;
+
+/**
+ * Parses titles from title functions based on the provided prompt.
+ * @param {string} prompt - The prompt containing the title function.
+ * @returns {string} The parsed title. "New Chat" if no title is found.
+ */
+function parseTitleFromPrompt(prompt) {
+ const titleRegex = /(.+?)<\/title>/;
+ const titleMatch = prompt.match(titleRegex);
+
+ if (titleMatch && titleMatch[1]) {
+ const title = titleMatch[1].trim();
+
+ // // Capitalize the first letter of each word; Note: unnecessary due to title case prompting
+ // const capitalizedTitle = title.replace(/\b\w/g, (char) => char.toUpperCase());
+
+ return title;
+ }
+
+ return 'New Chat';
+}
+
module.exports = {
langPrompt,
+ titleInstruction,
createTitlePrompt,
+ titleFunctionPrompt,
+ parseTitleFromPrompt,
};
diff --git a/api/app/clients/specs/BaseClient.test.js b/api/app/clients/specs/BaseClient.test.js
index 889499fbc29..9ffa7e04f1b 100644
--- a/api/app/clients/specs/BaseClient.test.js
+++ b/api/app/clients/specs/BaseClient.test.js
@@ -1,3 +1,4 @@
+const { Constants } = require('librechat-data-provider');
const { initializeFakeClient } = require('./FakeClient');
jest.mock('../../../lib/db/connectDb');
@@ -307,7 +308,7 @@ describe('BaseClient', () => {
const unorderedMessages = [
{ id: '3', parentMessageId: '2', text: 'Message 3' },
{ id: '2', parentMessageId: '1', text: 'Message 2' },
- { id: '1', parentMessageId: '00000000-0000-0000-0000-000000000000', text: 'Message 1' },
+ { id: '1', parentMessageId: Constants.NO_PARENT, text: 'Message 1' },
];
it('should return ordered messages based on parentMessageId', () => {
@@ -316,7 +317,7 @@ describe('BaseClient', () => {
parentMessageId: '3',
});
expect(result).toEqual([
- { id: '1', parentMessageId: '00000000-0000-0000-0000-000000000000', text: 'Message 1' },
+ { id: '1', parentMessageId: Constants.NO_PARENT, text: 'Message 1' },
{ id: '2', parentMessageId: '1', text: 'Message 2' },
{ id: '3', parentMessageId: '2', text: 'Message 3' },
]);
diff --git a/api/app/clients/specs/PluginsClient.test.js b/api/app/clients/specs/PluginsClient.test.js
index b4e42b1fc51..dfd57b23b94 100644
--- a/api/app/clients/specs/PluginsClient.test.js
+++ b/api/app/clients/specs/PluginsClient.test.js
@@ -1,9 +1,10 @@
+const crypto = require('crypto');
+const { Constants } = require('librechat-data-provider');
const { HumanChatMessage, AIChatMessage } = require('langchain/schema');
const PluginsClient = require('../PluginsClient');
-const crypto = require('crypto');
-jest.mock('../../../lib/db/connectDb');
-jest.mock('../../../models/Conversation', () => {
+jest.mock('~/lib/db/connectDb');
+jest.mock('~/models/Conversation', () => {
return function () {
return {
save: jest.fn(),
@@ -12,6 +13,12 @@ jest.mock('../../../models/Conversation', () => {
};
});
+const defaultAzureOptions = {
+ azureOpenAIApiInstanceName: 'your-instance-name',
+ azureOpenAIApiDeploymentName: 'your-deployment-name',
+ azureOpenAIApiVersion: '2020-07-01-preview',
+};
+
describe('PluginsClient', () => {
let TestAgent;
let options = {
@@ -60,7 +67,7 @@ describe('PluginsClient', () => {
TestAgent.setOptions(opts);
}
const conversationId = opts.conversationId || crypto.randomUUID();
- const parentMessageId = opts.parentMessageId || '00000000-0000-0000-0000-000000000000';
+ const parentMessageId = opts.parentMessageId || Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
this.pastMessages = await TestAgent.loadHistory(
conversationId,
@@ -187,4 +194,30 @@ describe('PluginsClient', () => {
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
});
});
+ describe('Azure OpenAI tests specific to Plugins', () => {
+ // TODO: add more tests for Azure OpenAI integration with Plugins
+ // let client;
+ // beforeEach(() => {
+ // client = new PluginsClient('dummy_api_key');
+ // });
+
+ test('should not call getFunctionModelName when azure options are set', () => {
+ const spy = jest.spyOn(PluginsClient.prototype, 'getFunctionModelName');
+ const model = 'gpt-4-turbo';
+
+ // note, without the azure change in PR #1766, `getFunctionModelName` is called twice
+ const testClient = new PluginsClient('dummy_api_key', {
+ agentOptions: {
+ model,
+ agent: 'functions',
+ },
+ azure: defaultAzureOptions,
+ });
+
+ expect(spy).not.toHaveBeenCalled();
+ expect(testClient.agentOptions.model).toBe(model);
+
+ spy.mockRestore();
+ });
+ });
});
diff --git a/api/app/clients/tools/DALL-E.js b/api/app/clients/tools/DALL-E.js
index 4eca7f7932e..4600bdb026e 100644
--- a/api/app/clients/tools/DALL-E.js
+++ b/api/app/clients/tools/DALL-E.js
@@ -3,42 +3,39 @@ const OpenAI = require('openai');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('langchain/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
+const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
-const { processFileURL } = require('~/server/services/Files/process');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
-const {
- DALLE2_SYSTEM_PROMPT,
- DALLE_REVERSE_PROXY,
- PROXY,
- DALLE2_AZURE_API_VERSION,
- DALLE2_BASEURL,
- DALLE2_API_KEY,
- DALLE_API_KEY,
-} = process.env;
class OpenAICreateImage extends Tool {
constructor(fields = {}) {
super();
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
+ if (fields.processFileURL) {
+ this.processFileURL = fields.processFileURL.bind(this);
+ }
let apiKey = fields.DALLE2_API_KEY ?? fields.DALLE_API_KEY ?? this.getApiKey();
const config = { apiKey };
- if (DALLE_REVERSE_PROXY) {
- config.baseURL = extractBaseURL(DALLE_REVERSE_PROXY);
+ if (process.env.DALLE_REVERSE_PROXY) {
+ config.baseURL = extractBaseURL(process.env.DALLE_REVERSE_PROXY);
}
- if (DALLE2_AZURE_API_VERSION && DALLE2_BASEURL) {
- config.baseURL = DALLE2_BASEURL;
- config.defaultQuery = { 'api-version': DALLE2_AZURE_API_VERSION };
- config.defaultHeaders = { 'api-key': DALLE2_API_KEY, 'Content-Type': 'application/json' };
- config.apiKey = DALLE2_API_KEY;
+ if (process.env.DALLE2_AZURE_API_VERSION && process.env.DALLE2_BASEURL) {
+ config.baseURL = process.env.DALLE2_BASEURL;
+ config.defaultQuery = { 'api-version': process.env.DALLE2_AZURE_API_VERSION };
+ config.defaultHeaders = {
+ 'api-key': process.env.DALLE2_API_KEY,
+ 'Content-Type': 'application/json',
+ };
+ config.apiKey = process.env.DALLE2_API_KEY;
}
- if (PROXY) {
- config.httpAgent = new HttpsProxyAgent(PROXY);
+ if (process.env.PROXY) {
+ config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
this.openai = new OpenAI(config);
@@ -51,7 +48,7 @@ Guidelines:
"Subject: [subject], Style: [style], Color: [color], Details: [details], Emotion: [emotion]"
- Generate images only once per human query unless explicitly requested by the user`;
this.description_for_model =
- DALLE2_SYSTEM_PROMPT ??
+ process.env.DALLE2_SYSTEM_PROMPT ??
`// Whenever a description of an image is given, generate prompts (following these rules), and use dalle to create the image. If the user does not ask for a specific number of images, default to creating 2 prompts to send to dalle that are written to be as diverse as possible. All prompts sent to dalle must abide by the following policies:
// 1. Prompts must be in English. Translate to English if needed.
// 2. One image per function call. Create only 1 image per request unless explicitly told to generate more than 1 image.
@@ -67,7 +64,7 @@ Guidelines:
}
getApiKey() {
- const apiKey = DALLE2_API_KEY ?? DALLE_API_KEY ?? '';
+ const apiKey = process.env.DALLE2_API_KEY ?? process.env.DALLE_API_KEY ?? '';
if (!apiKey) {
throw new Error('Missing DALLE_API_KEY environment variable.');
}
@@ -86,13 +83,21 @@ Guidelines:
}
async _call(input) {
- const resp = await this.openai.images.generate({
- prompt: this.replaceUnwantedChars(input),
- // TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
- n: 1,
- // size: '1024x1024'
- size: '512x512',
- });
+ let resp;
+
+ try {
+ resp = await this.openai.images.generate({
+ prompt: this.replaceUnwantedChars(input),
+ // TODO: Future idea -- could we ask an LLM to extract these arguments from an input that might contain them?
+ n: 1,
+ // size: '1024x1024'
+ size: '512x512',
+ });
+ } catch (error) {
+ logger.error('[DALL-E] Problem generating the image:', error);
+ return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
+Error Message: ${error.message}`;
+ }
const theImageUrl = resp.data[0].url;
@@ -116,15 +121,16 @@ Guidelines:
});
try {
- const result = await processFileURL({
+ const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
+ context: FileContext.image_generation,
});
- this.result = this.wrapInMarkdown(result);
+ this.result = this.wrapInMarkdown(result.filepath);
} catch (error) {
logger.error('Error while saving the image:', error);
this.result = `Failed to save the image locally. ${error.message}`;
diff --git a/api/app/clients/tools/GoogleSearch.js b/api/app/clients/tools/GoogleSearch.js
deleted file mode 100644
index 3d7574b6c19..00000000000
--- a/api/app/clients/tools/GoogleSearch.js
+++ /dev/null
@@ -1,121 +0,0 @@
-const { google } = require('googleapis');
-const { Tool } = require('langchain/tools');
-const { logger } = require('~/config');
-
-/**
- * Represents a tool that allows an agent to use the Google Custom Search API.
- * @extends Tool
- */
-class GoogleSearchAPI extends Tool {
- constructor(fields = {}) {
- super();
- this.cx = fields.GOOGLE_CSE_ID || this.getCx();
- this.apiKey = fields.GOOGLE_API_KEY || this.getApiKey();
- this.customSearch = undefined;
- }
-
- /**
- * The name of the tool.
- * @type {string}
- */
- name = 'google';
-
- /**
- * A description for the agent to use
- * @type {string}
- */
- description =
- 'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
- description_for_model =
- 'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
-
- getCx() {
- const cx = process.env.GOOGLE_CSE_ID || '';
- if (!cx) {
- throw new Error('Missing GOOGLE_CSE_ID environment variable.');
- }
- return cx;
- }
-
- getApiKey() {
- const apiKey = process.env.GOOGLE_API_KEY || '';
- if (!apiKey) {
- throw new Error('Missing GOOGLE_API_KEY environment variable.');
- }
- return apiKey;
- }
-
- getCustomSearch() {
- if (!this.customSearch) {
- const version = 'v1';
- this.customSearch = google.customsearch(version);
- }
- return this.customSearch;
- }
-
- resultsToReadableFormat(results) {
- let output = 'Results:\n';
-
- results.forEach((resultObj, index) => {
- output += `Title: ${resultObj.title}\n`;
- output += `Link: ${resultObj.link}\n`;
- if (resultObj.snippet) {
- output += `Snippet: ${resultObj.snippet}\n`;
- }
-
- if (index < results.length - 1) {
- output += '\n';
- }
- });
-
- return output;
- }
-
- /**
- * Calls the tool with the provided input and returns a promise that resolves with a response from the Google Custom Search API.
- * @param {string} input - The input to provide to the API.
- * @returns {Promise} A promise that resolves with a response from the Google Custom Search API.
- */
- async _call(input) {
- try {
- const metadataResults = [];
- const response = await this.getCustomSearch().cse.list({
- q: input,
- cx: this.cx,
- auth: this.apiKey,
- num: 5, // Limit the number of results to 5
- });
-
- // return response.data;
- // logger.debug(response.data);
-
- if (!response.data.items || response.data.items.length === 0) {
- return this.resultsToReadableFormat([
- { title: 'No good Google Search Result was found', link: '' },
- ]);
- }
-
- // const results = response.items.slice(0, numResults);
- const results = response.data.items;
-
- for (const result of results) {
- const metadataResult = {
- title: result.title || '',
- link: result.link || '',
- };
- if (result.snippet) {
- metadataResult.snippet = result.snippet;
- }
- metadataResults.push(metadataResult);
- }
-
- return this.resultsToReadableFormat(metadataResults);
- } catch (error) {
- logger.error('[GoogleSearchAPI]', error);
- // throw error;
- return 'There was an error searching Google.';
- }
- }
-}
-
-module.exports = GoogleSearchAPI;
diff --git a/api/app/clients/tools/index.js b/api/app/clients/tools/index.js
index f5410e89eec..f16d229e6b7 100644
--- a/api/app/clients/tools/index.js
+++ b/api/app/clients/tools/index.js
@@ -1,35 +1,44 @@
-const GoogleSearchAPI = require('./GoogleSearch');
+const availableTools = require('./manifest.json');
+// Basic Tools
+const CodeBrew = require('./CodeBrew');
+const WolframAlphaAPI = require('./Wolfram');
+const AzureAiSearch = require('./AzureAiSearch');
const OpenAICreateImage = require('./DALL-E');
-const DALLE3 = require('./structured/DALLE3');
-const StructuredSD = require('./structured/StableDiffusion');
const StableDiffusionAPI = require('./StableDiffusion');
-const WolframAlphaAPI = require('./Wolfram');
-const StructuredWolfram = require('./structured/Wolfram');
const SelfReflectionTool = require('./SelfReflection');
-const AzureAiSearch = require('./AzureAiSearch');
-const StructuredACS = require('./structured/AzureAISearch');
+
+// Structured Tools
+const DALLE3 = require('./structured/DALLE3');
const ChatTool = require('./structured/ChatTool');
const E2BTools = require('./structured/E2BTools');
const CodeSherpa = require('./structured/CodeSherpa');
+const StructuredSD = require('./structured/StableDiffusion');
+const StructuredACS = require('./structured/AzureAISearch');
const CodeSherpaTools = require('./structured/CodeSherpaTools');
-const availableTools = require('./manifest.json');
-const CodeBrew = require('./CodeBrew');
+const GoogleSearchAPI = require('./structured/GoogleSearch');
+const StructuredWolfram = require('./structured/Wolfram');
+const TavilySearchResults = require('./structured/TavilySearchResults');
+const TraversaalSearch = require('./structured/TraversaalSearch');
module.exports = {
availableTools,
+ // Basic Tools
+ CodeBrew,
+ AzureAiSearch,
GoogleSearchAPI,
+ WolframAlphaAPI,
OpenAICreateImage,
- DALLE3,
StableDiffusionAPI,
- StructuredSD,
- WolframAlphaAPI,
- StructuredWolfram,
SelfReflectionTool,
- AzureAiSearch,
- StructuredACS,
- E2BTools,
+ // Structured Tools
+ DALLE3,
ChatTool,
+ E2BTools,
CodeSherpa,
+ StructuredSD,
+ StructuredACS,
CodeSherpaTools,
- CodeBrew,
+ StructuredWolfram,
+ TavilySearchResults,
+ TraversaalSearch,
};
diff --git a/api/app/clients/tools/manifest.json b/api/app/clients/tools/manifest.json
index bde4c8a87a9..3daaf9dd3bc 100644
--- a/api/app/clients/tools/manifest.json
+++ b/api/app/clients/tools/manifest.json
@@ -1,4 +1,17 @@
[
+ {
+ "name": "Traversaal",
+ "pluginKey": "traversaal_search",
+ "description": "Traversaal is a robust search API tailored for LLM Agents. Get an API key here: https://api.traversaal.ai",
+ "icon": "https://traversaal.ai/favicon.ico",
+ "authConfig": [
+ {
+ "authField": "TRAVERSAAL_API_KEY",
+ "label": "Traversaal API Key",
+ "description": "Get your API key here: https://api.traversaal.ai"
+ }
+ ]
+ },
{
"name": "Google",
"pluginKey": "google",
@@ -89,7 +102,7 @@
"icon": "https://i.imgur.com/u2TzXzH.png",
"authConfig": [
{
- "authField": "DALLE2_API_KEY",
+ "authField": "DALLE2_API_KEY||DALLE_API_KEY",
"label": "OpenAI API Key",
"description": "You can use DALL-E with your API Key from OpenAI."
}
@@ -102,12 +115,25 @@
"icon": "https://i.imgur.com/u2TzXzH.png",
"authConfig": [
{
- "authField": "DALLE3_API_KEY",
+ "authField": "DALLE3_API_KEY||DALLE_API_KEY",
"label": "OpenAI API Key",
"description": "You can use DALL-E with your API Key from OpenAI."
}
]
},
+ {
+ "name": "Tavily Search",
+ "pluginKey": "tavily_search_results_json",
+ "description": "Tavily Search is a robust search API tailored for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.",
+ "icon": "https://tavily.com/favicon.ico",
+ "authConfig": [
+ {
+ "authField": "TAVILY_API_KEY",
+ "label": "Tavily API Key",
+ "description": "Get your API key here: https://app.tavily.com/"
+ }
+ ]
+ },
{
"name": "Calculator",
"pluginKey": "calculator",
diff --git a/api/app/clients/tools/structured/AzureAISearch.js b/api/app/clients/tools/structured/AzureAISearch.js
index 9b50aa2c433..0ce7b43fb21 100644
--- a/api/app/clients/tools/structured/AzureAISearch.js
+++ b/api/app/clients/tools/structured/AzureAISearch.js
@@ -19,6 +19,13 @@ class AzureAISearch extends StructuredTool {
this.name = 'azure-ai-search';
this.description =
'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
+ /* Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+
+ // Define schema
+ this.schema = z.object({
+ query: z.string().describe('Search word or phrase to Azure AI Search'),
+ });
// Initialize properties using helper function
this.serviceEndpoint = this._initializeField(
@@ -51,12 +58,16 @@ class AzureAISearch extends StructuredTool {
);
// Check for required fields
- if (!this.serviceEndpoint || !this.indexName || !this.apiKey) {
+ if (!this.override && (!this.serviceEndpoint || !this.indexName || !this.apiKey)) {
throw new Error(
'Missing AZURE_AI_SEARCH_SERVICE_ENDPOINT, AZURE_AI_SEARCH_INDEX_NAME, or AZURE_AI_SEARCH_API_KEY environment variable.',
);
}
+ if (this.override) {
+ return;
+ }
+
// Create SearchClient
this.client = new SearchClient(
this.serviceEndpoint,
@@ -64,11 +75,6 @@ class AzureAISearch extends StructuredTool {
new AzureKeyCredential(this.apiKey),
{ apiVersion: this.apiVersion },
);
-
- // Define schema
- this.schema = z.object({
- query: z.string().describe('Search word or phrase to Azure AI Search'),
- });
}
// Improved error handling and logging
diff --git a/api/app/clients/tools/structured/DALLE3.js b/api/app/clients/tools/structured/DALLE3.js
index 33df93e7fcf..3155992ca9b 100644
--- a/api/app/clients/tools/structured/DALLE3.js
+++ b/api/app/clients/tools/structured/DALLE3.js
@@ -4,42 +4,47 @@ const OpenAI = require('openai');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('langchain/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
+const { FileContext } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
-const { processFileURL } = require('~/server/services/Files/process');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
-const {
- DALLE3_SYSTEM_PROMPT,
- DALLE_REVERSE_PROXY,
- PROXY,
- DALLE3_AZURE_API_VERSION,
- DALLE3_BASEURL,
- DALLE3_API_KEY,
-} = process.env;
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+ /** @type {boolean} Necessary for output to contain all image metadata. */
+ this.returnMetadata = fields.returnMetadata ?? false;
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
+ if (fields.processFileURL) {
+ /** @type {processFileURL} Necessary for output to contain all image metadata. */
+ this.processFileURL = fields.processFileURL.bind(this);
+ }
+
let apiKey = fields.DALLE3_API_KEY ?? fields.DALLE_API_KEY ?? this.getApiKey();
const config = { apiKey };
- if (DALLE_REVERSE_PROXY) {
- config.baseURL = extractBaseURL(DALLE_REVERSE_PROXY);
+ if (process.env.DALLE_REVERSE_PROXY) {
+ config.baseURL = extractBaseURL(process.env.DALLE_REVERSE_PROXY);
}
- if (DALLE3_AZURE_API_VERSION && DALLE3_BASEURL) {
- config.baseURL = DALLE3_BASEURL;
- config.defaultQuery = { 'api-version': DALLE3_AZURE_API_VERSION };
- config.defaultHeaders = { 'api-key': DALLE3_API_KEY, 'Content-Type': 'application/json' };
- config.apiKey = DALLE3_API_KEY;
+ if (process.env.DALLE3_AZURE_API_VERSION && process.env.DALLE3_BASEURL) {
+ config.baseURL = process.env.DALLE3_BASEURL;
+ config.defaultQuery = { 'api-version': process.env.DALLE3_AZURE_API_VERSION };
+ config.defaultHeaders = {
+ 'api-key': process.env.DALLE3_API_KEY,
+ 'Content-Type': 'application/json',
+ };
+ config.apiKey = process.env.DALLE3_API_KEY;
}
- if (PROXY) {
- config.httpAgent = new HttpsProxyAgent(PROXY);
+ if (process.env.PROXY) {
+ config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
+ /** @type {OpenAI} */
this.openai = new OpenAI(config);
this.name = 'dalle';
this.description = `Use DALLE to create images from text descriptions.
@@ -47,7 +52,7 @@ class DALLE3 extends Tool {
- Create only one image, without repeating or listing descriptions outside the "prompts" field.
- Maintains the original intent of the description, with parameters for image style, quality, and size to tailor the output.`;
this.description_for_model =
- DALLE3_SYSTEM_PROMPT ??
+ process.env.DALLE3_SYSTEM_PROMPT ??
`// Whenever a description of an image is given, generate prompts (following these rules), and use dalle to create the image. If the user does not ask for a specific number of images, default to creating 2 prompts to send to dalle that are written to be as diverse as possible. All prompts sent to dalle must abide by the following policies:
// 1. Prompts must be in English. Translate to English if needed.
// 2. One image per function call. Create only 1 image per request unless explicitly told to generate more than 1 image.
@@ -86,7 +91,7 @@ class DALLE3 extends Tool {
getApiKey() {
const apiKey = process.env.DALLE3_API_KEY ?? process.env.DALLE_API_KEY ?? '';
- if (!apiKey) {
+ if (!apiKey && !this.override) {
throw new Error('Missing DALLE_API_KEY environment variable.');
}
return apiKey;
@@ -120,6 +125,7 @@ class DALLE3 extends Tool {
n: 1,
});
} catch (error) {
+ logger.error('[DALL-E-3] Problem generating the image:', error);
return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
Error Message: ${error.message}`;
}
@@ -150,15 +156,20 @@ Error Message: ${error.message}`;
});
try {
- const result = await processFileURL({
+ const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
+ context: FileContext.image_generation,
});
- this.result = this.wrapInMarkdown(result);
+ if (this.returnMetadata) {
+ this.result = result;
+ } else {
+ this.result = this.wrapInMarkdown(result.filepath);
+ }
} catch (error) {
logger.error('Error while saving the image:', error);
this.result = `Failed to save the image locally. ${error.message}`;
diff --git a/api/app/clients/tools/structured/GoogleSearch.js b/api/app/clients/tools/structured/GoogleSearch.js
new file mode 100644
index 00000000000..92d33272c83
--- /dev/null
+++ b/api/app/clients/tools/structured/GoogleSearch.js
@@ -0,0 +1,65 @@
+const { z } = require('zod');
+const { Tool } = require('@langchain/core/tools');
+const { getEnvironmentVariable } = require('@langchain/core/utils/env');
+
+class GoogleSearchResults extends Tool {
+ static lc_name() {
+ return 'GoogleSearchResults';
+ }
+
+ constructor(fields = {}) {
+ super(fields);
+ this.envVarApiKey = 'GOOGLE_API_KEY';
+ this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
+ this.override = fields.override ?? false;
+ this.apiKey = fields.apiKey ?? getEnvironmentVariable(this.envVarApiKey);
+ this.searchEngineId =
+ fields.searchEngineId ?? getEnvironmentVariable(this.envVarSearchEngineId);
+
+ this.kwargs = fields?.kwargs ?? {};
+ this.name = 'google';
+ this.description =
+ 'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
+
+ this.schema = z.object({
+ query: z.string().min(1).describe('The search query string.'),
+ max_results: z
+ .number()
+ .min(1)
+ .max(10)
+ .optional()
+ .describe('The maximum number of search results to return. Defaults to 10.'),
+ // Note: Google API has its own parameters for search customization, adjust as needed.
+ });
+ }
+
+ async _call(input) {
+ const validationResult = this.schema.safeParse(input);
+ if (!validationResult.success) {
+ throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
+ }
+
+ const { query, max_results = 5 } = validationResult.data;
+
+ const response = await fetch(
+ `https://www.googleapis.com/customsearch/v1?key=${this.apiKey}&cx=${
+ this.searchEngineId
+ }&q=${encodeURIComponent(query)}&num=${max_results}`,
+ {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ },
+ );
+
+ const json = await response.json();
+ if (!response.ok) {
+ throw new Error(`Request failed with status ${response.status}: ${json.error.message}`);
+ }
+
+ return JSON.stringify(json);
+ }
+}
+
+module.exports = GoogleSearchResults;
diff --git a/api/app/clients/tools/structured/StableDiffusion.js b/api/app/clients/tools/structured/StableDiffusion.js
index 1fc5096730e..e891cbb398a 100644
--- a/api/app/clients/tools/structured/StableDiffusion.js
+++ b/api/app/clients/tools/structured/StableDiffusion.js
@@ -4,12 +4,28 @@ const { z } = require('zod');
const path = require('path');
const axios = require('axios');
const sharp = require('sharp');
+const { v4: uuidv4 } = require('uuid');
const { StructuredTool } = require('langchain/tools');
+const { FileContext } = require('librechat-data-provider');
+const paths = require('~/config/paths');
const { logger } = require('~/config');
class StableDiffusionAPI extends StructuredTool {
constructor(fields) {
super();
+ /** @type {string} User ID */
+ this.userId = fields.userId;
+ /** @type {Express.Request | undefined} Express Request object, only provided by ToolService */
+ this.req = fields.req;
+ /** @type {boolean} Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+ /** @type {boolean} Necessary for output to contain all image metadata. */
+ this.returnMetadata = fields.returnMetadata ?? false;
+ if (fields.uploadImageBuffer) {
+ /** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
+ this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
+ }
+
this.name = 'stable-diffusion';
this.url = fields.SD_WEBUI_URL || this.getServerURL();
this.description_for_model = `// Generate images and visuals using text.
@@ -44,7 +60,7 @@ class StableDiffusionAPI extends StructuredTool {
getMarkdownImageUrl(imageName) {
const imageUrl = path
- .join(this.relativeImageUrl, imageName)
+ .join(this.relativePath, this.userId, imageName)
.replace(/\\/g, '/')
.replace('public/', '');
return ``;
@@ -52,7 +68,7 @@ class StableDiffusionAPI extends StructuredTool {
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
- if (!url) {
+ if (!url && !this.override) {
throw new Error('Missing SD_WEBUI_URL environment variable.');
}
return url;
@@ -70,46 +86,67 @@ class StableDiffusionAPI extends StructuredTool {
width: 1024,
height: 1024,
};
- const response = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
- const image = response.data.images[0];
- const pngPayload = { image: `data:image/png;base64,${image}` };
- const response2 = await axios.post(`${url}/sdapi/v1/png-info`, pngPayload);
- const info = response2.data.info;
+ const generationResponse = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
+ const image = generationResponse.data.images[0];
+
+ /** @type {{ height: number, width: number, seed: number, infotexts: string[] }} */
+ let info = {};
+ try {
+ info = JSON.parse(generationResponse.data.info);
+ } catch (error) {
+ logger.error('[StableDiffusion] Error while getting image metadata:', error);
+ }
- // Generate unique name
- const imageName = `${Date.now()}.png`;
- this.outputPath = path.resolve(
- __dirname,
- '..',
- '..',
- '..',
- '..',
- '..',
- 'client',
- 'public',
- 'images',
- );
- const appRoot = path.resolve(__dirname, '..', '..', '..', '..', '..', 'client');
- this.relativeImageUrl = path.relative(appRoot, this.outputPath);
+ const file_id = uuidv4();
+ const imageName = `${file_id}.png`;
+ const { imageOutput: imageOutputPath, clientPath } = paths;
+ const filepath = path.join(imageOutputPath, this.userId, imageName);
+ this.relativePath = path.relative(clientPath, imageOutputPath);
- // Check if directory exists, if not create it
- if (!fs.existsSync(this.outputPath)) {
- fs.mkdirSync(this.outputPath, { recursive: true });
+ if (!fs.existsSync(path.join(imageOutputPath, this.userId))) {
+ fs.mkdirSync(path.join(imageOutputPath, this.userId), { recursive: true });
}
try {
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
+ if (this.returnMetadata && this.uploadImageBuffer && this.req) {
+ const file = await this.uploadImageBuffer({
+ req: this.req,
+ context: FileContext.image_generation,
+ resize: false,
+ metadata: {
+ buffer,
+ height: info.height,
+ width: info.width,
+ bytes: Buffer.byteLength(buffer),
+ filename: imageName,
+ type: 'image/png',
+ file_id,
+ },
+ });
+
+ const generationInfo = info.infotexts[0].split('\n').pop();
+ return {
+ ...file,
+ prompt,
+ metadata: {
+ negative_prompt,
+ seed: info.seed,
+ info: generationInfo,
+ },
+ };
+ }
+
await sharp(buffer)
.withMetadata({
iptcpng: {
- parameters: info,
+ parameters: info.infotexts[0],
},
})
- .toFile(this.outputPath + '/' + imageName);
+ .toFile(filepath);
this.result = this.getMarkdownImageUrl(imageName);
} catch (error) {
logger.error('[StableDiffusion] Error while saving the image:', error);
- // this.result = theImageUrl;
}
return this.result;
diff --git a/api/app/clients/tools/structured/TavilySearchResults.js b/api/app/clients/tools/structured/TavilySearchResults.js
new file mode 100644
index 00000000000..3945ac1d00f
--- /dev/null
+++ b/api/app/clients/tools/structured/TavilySearchResults.js
@@ -0,0 +1,92 @@
+const { z } = require('zod');
+const { Tool } = require('@langchain/core/tools');
+const { getEnvironmentVariable } = require('@langchain/core/utils/env');
+
+class TavilySearchResults extends Tool {
+ static lc_name() {
+ return 'TavilySearchResults';
+ }
+
+ constructor(fields = {}) {
+ super(fields);
+ this.envVar = 'TAVILY_API_KEY';
+ /* Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+ this.apiKey = fields.apiKey ?? this.getApiKey();
+
+ this.kwargs = fields?.kwargs ?? {};
+ this.name = 'tavily_search_results_json';
+ this.description =
+ 'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
+
+ this.schema = z.object({
+ query: z.string().min(1).describe('The search query string.'),
+ max_results: z
+ .number()
+ .min(1)
+ .max(10)
+ .optional()
+ .describe('The maximum number of search results to return. Defaults to 5.'),
+ search_depth: z
+ .enum(['basic', 'advanced'])
+ .optional()
+ .describe(
+ 'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.',
+ ),
+ include_images: z
+ .boolean()
+ .optional()
+ .describe(
+ 'Whether to include a list of query-related images in the response. Default is False.',
+ ),
+ include_answer: z
+ .boolean()
+ .optional()
+ .describe('Whether to include answers in the search results. Default is False.'),
+ // include_raw_content: z.boolean().optional().describe('Whether to include raw content in the search results. Default is False.'),
+ // include_domains: z.array(z.string()).optional().describe('A list of domains to specifically include in the search results.'),
+ // exclude_domains: z.array(z.string()).optional().describe('A list of domains to specifically exclude from the search results.'),
+ });
+ }
+
+ getApiKey() {
+ const apiKey = getEnvironmentVariable(this.envVar);
+ if (!apiKey && !this.override) {
+ throw new Error(`Missing ${this.envVar} environment variable.`);
+ }
+ return apiKey;
+ }
+
+ async _call(input) {
+ const validationResult = this.schema.safeParse(input);
+ if (!validationResult.success) {
+ throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
+ }
+
+ const { query, ...rest } = validationResult.data;
+
+ const requestBody = {
+ api_key: this.apiKey,
+ query,
+ ...rest,
+ ...this.kwargs,
+ };
+
+ const response = await fetch('https://api.tavily.com/search', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(requestBody),
+ });
+
+ const json = await response.json();
+ if (!response.ok) {
+ throw new Error(`Request failed with status ${response.status}: ${json.error}`);
+ }
+
+ return JSON.stringify(json);
+ }
+}
+
+module.exports = TavilySearchResults;
diff --git a/api/app/clients/tools/structured/TraversaalSearch.js b/api/app/clients/tools/structured/TraversaalSearch.js
new file mode 100644
index 00000000000..e8ceeda134f
--- /dev/null
+++ b/api/app/clients/tools/structured/TraversaalSearch.js
@@ -0,0 +1,89 @@
+const { z } = require('zod');
+const { Tool } = require('@langchain/core/tools');
+const { getEnvironmentVariable } = require('@langchain/core/utils/env');
+const { logger } = require('~/config');
+
+/**
+ * Tool for the Traversaal AI search API, Ares.
+ */
+class TraversaalSearch extends Tool {
+ static lc_name() {
+ return 'TraversaalSearch';
+ }
+ constructor(fields) {
+ super(fields);
+ this.name = 'traversaal_search';
+ this.description = `An AI search engine optimized for comprehensive, accurate, and trusted results.
+ Useful for when you need to answer questions about current events. Input should be a search query.`;
+ this.description_for_model =
+ '\'Please create a specific sentence for the AI to understand and use as a query to search the web based on the user\'s request. For example, "Find information about the highest mountains in the world." or "Show me the latest news articles about climate change and its impact on polar ice caps."\'';
+ this.schema = z.object({
+ query: z
+ .string()
+ .describe(
+ 'A properly written sentence to be interpreted by an AI to search the web according to the user\'s request.',
+ ),
+ });
+
+ this.apiKey = fields?.TRAVERSAAL_API_KEY ?? this.getApiKey();
+ }
+
+ getApiKey() {
+ const apiKey = getEnvironmentVariable('TRAVERSAAL_API_KEY');
+ if (!apiKey && this.override) {
+ throw new Error(
+ 'No Traversaal API key found. Either set an environment variable named "TRAVERSAAL_API_KEY" or pass an API key as "apiKey".',
+ );
+ }
+ return apiKey;
+ }
+
+ // eslint-disable-next-line no-unused-vars
+ async _call({ query }, _runManager) {
+ const body = {
+ query: [query],
+ };
+ try {
+ const response = await fetch('https://api-ares.traversaal.ai/live/predict', {
+ method: 'POST',
+ headers: {
+ 'content-type': 'application/json',
+ 'x-api-key': this.apiKey,
+ },
+ body: JSON.stringify({ ...body }),
+ });
+ const json = await response.json();
+ if (!response.ok) {
+ throw new Error(
+ `Request failed with status code ${response.status}: ${json.error ?? json.message}`,
+ );
+ }
+ if (!json.data) {
+ throw new Error('Could not parse Traversaal API results. Please try again.');
+ }
+
+ const baseText = json.data?.response_text ?? '';
+ const sources = json.data?.web_url;
+ const noResponse = 'No response found in Traversaal API results';
+
+ if (!baseText && !sources) {
+ return noResponse;
+ }
+
+ const sourcesText = sources?.length ? '\n\nSources:\n - ' + sources.join('\n - ') : '';
+
+ const result = baseText + sourcesText;
+
+ if (!result) {
+ return noResponse;
+ }
+
+ return result;
+ } catch (error) {
+ logger.error('Traversaal API request failed', error);
+ return `Traversaal API request failed: ${error.message}`;
+ }
+ }
+}
+
+module.exports = TraversaalSearch;
diff --git a/api/app/clients/tools/structured/Wolfram.js b/api/app/clients/tools/structured/Wolfram.js
index 2c5c6e023a1..fc857b35cb2 100644
--- a/api/app/clients/tools/structured/Wolfram.js
+++ b/api/app/clients/tools/structured/Wolfram.js
@@ -7,6 +7,9 @@ const { logger } = require('~/config');
class WolframAlphaAPI extends StructuredTool {
constructor(fields) {
super();
+ /* Used to initialize the Tool without necessary variables. */
+ this.override = fields.override ?? false;
+
this.name = 'wolfram';
this.apiKey = fields.WOLFRAM_APP_ID || this.getAppId();
this.description_for_model = `// Access dynamic computation and curated data from WolframAlpha and Wolfram Cloud.
@@ -55,7 +58,7 @@ class WolframAlphaAPI extends StructuredTool {
getAppId() {
const appId = process.env.WOLFRAM_APP_ID || '';
- if (!appId) {
+ if (!appId && !this.override) {
throw new Error('Missing WOLFRAM_APP_ID environment variable.');
}
return appId;
diff --git a/api/app/clients/tools/structured/specs/DALLE3.spec.js b/api/app/clients/tools/structured/specs/DALLE3.spec.js
index 58771b1459e..1b28de2faf1 100644
--- a/api/app/clients/tools/structured/specs/DALLE3.spec.js
+++ b/api/app/clients/tools/structured/specs/DALLE3.spec.js
@@ -1,14 +1,11 @@
const OpenAI = require('openai');
const DALLE3 = require('../DALLE3');
-const { processFileURL } = require('~/server/services/Files/process');
const { logger } = require('~/config');
jest.mock('openai');
-jest.mock('~/server/services/Files/process', () => ({
- processFileURL: jest.fn(),
-}));
+const processFileURL = jest.fn();
jest.mock('~/server/services/Files/images', () => ({
getImageBasename: jest.fn().mockImplementation((url) => {
@@ -69,7 +66,7 @@ describe('DALLE3', () => {
jest.resetModules();
process.env = { ...originalEnv, DALLE_API_KEY: mockApiKey };
// Instantiate DALLE3 for tests that do not depend on DALLE3_SYSTEM_PROMPT
- dalle = new DALLE3();
+ dalle = new DALLE3({ processFileURL });
});
afterEach(() => {
@@ -78,7 +75,8 @@ describe('DALLE3', () => {
process.env = originalEnv;
});
- it('should throw an error if DALLE_API_KEY is missing', () => {
+ it('should throw an error if all potential API keys are missing', () => {
+ delete process.env.DALLE3_API_KEY;
delete process.env.DALLE_API_KEY;
expect(() => new DALLE3()).toThrow('Missing DALLE_API_KEY environment variable.');
});
@@ -112,7 +110,9 @@ describe('DALLE3', () => {
};
generate.mockResolvedValue(mockResponse);
- processFileURL.mockResolvedValue('http://example.com/img-test.png');
+ processFileURL.mockResolvedValue({
+ filepath: 'http://example.com/img-test.png',
+ });
const result = await dalle._call(mockData);
diff --git a/api/app/clients/tools/util/handleTools.js b/api/app/clients/tools/util/handleTools.js
index 1d9a3a00749..7ed18658711 100644
--- a/api/app/clients/tools/util/handleTools.js
+++ b/api/app/clients/tools/util/handleTools.js
@@ -6,19 +6,23 @@ const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
+ // Basic Tools
+ CodeBrew,
+ AzureAISearch,
GoogleSearchAPI,
WolframAlphaAPI,
- StructuredWolfram,
OpenAICreateImage,
StableDiffusionAPI,
+ // Structured Tools
DALLE3,
- StructuredSD,
- AzureAISearch,
- StructuredACS,
E2BTools,
CodeSherpa,
+ StructuredSD,
+ StructuredACS,
CodeSherpaTools,
- CodeBrew,
+ TraversaalSearch,
+ StructuredWolfram,
+ TavilySearchResults,
} = require('../');
const { loadToolSuite } = require('./loadToolSuite');
const { loadSpecs } = require('./loadSpecs');
@@ -30,6 +34,14 @@ const getOpenAIKey = async (options, user) => {
return openAIApiKey || (await getUserPluginAuthValue(user, 'OPENAI_API_KEY'));
};
+/**
+ * Validates the availability and authentication of tools for a user based on environment variables or user-specific plugin authentication values.
+ * Tools without required authentication or with valid authentication are considered valid.
+ *
+ * @param {Object} user The user object for whom to validate tool access.
+ * @param {Array} tools An array of tool identifiers to validate. Defaults to an empty array.
+ * @returns {Promise>} A promise that resolves to an array of valid tool identifiers.
+ */
const validateTools = async (user, tools = []) => {
try {
const validToolsSet = new Set(tools);
@@ -37,16 +49,34 @@ const validateTools = async (user, tools = []) => {
validToolsSet.has(tool.pluginKey),
);
+ /**
+ * Validates the credentials for a given auth field or set of alternate auth fields for a tool.
+ * If valid admin or user authentication is found, the function returns early. Otherwise, it removes the tool from the set of valid tools.
+ *
+ * @param {string} authField The authentication field or fields (separated by "||" for alternates) to validate.
+ * @param {string} toolName The identifier of the tool being validated.
+ */
const validateCredentials = async (authField, toolName) => {
- const adminAuth = process.env[authField];
- if (adminAuth && adminAuth.length > 0) {
- return;
+ const fields = authField.split('||');
+ for (const field of fields) {
+ const adminAuth = process.env[field];
+ if (adminAuth && adminAuth.length > 0) {
+ return;
+ }
+
+ let userAuth = null;
+ try {
+ userAuth = await getUserPluginAuthValue(user, field);
+ } catch (err) {
+ if (field === fields[fields.length - 1] && !userAuth) {
+ throw err;
+ }
+ }
+ if (userAuth && userAuth.length > 0) {
+ return;
+ }
}
- const userAuth = await getUserPluginAuthValue(user, authField);
- if (userAuth && userAuth.length > 0) {
- return;
- }
validToolsSet.delete(toolName);
};
@@ -63,20 +93,55 @@ const validateTools = async (user, tools = []) => {
return Array.from(validToolsSet.values());
} catch (err) {
logger.error('[validateTools] There was a problem validating tools', err);
- throw new Error(err);
+ throw new Error('There was a problem validating tools');
}
};
-const loadToolWithAuth = async (userId, authFields, ToolConstructor, options = {}) => {
+/**
+ * Initializes a tool with authentication values for the given user, supporting alternate authentication fields.
+ * Authentication fields can have alternates separated by "||", and the first defined variable will be used.
+ *
+ * @param {string} userId The user ID for which the tool is being loaded.
+ * @param {Array} authFields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
+ * @param {typeof import('langchain/tools').Tool} ToolConstructor The constructor function for the tool to be initialized.
+ * @param {Object} options Optional parameters to be passed to the tool constructor alongside authentication values.
+ * @returns {Function} An Async function that, when called, asynchronously initializes and returns an instance of the tool with authentication.
+ */
+const loadToolWithAuth = (userId, authFields, ToolConstructor, options = {}) => {
return async function () {
let authValues = {};
- for (const authField of authFields) {
- let authValue = process.env[authField];
- if (!authValue) {
- authValue = await getUserPluginAuthValue(userId, authField);
+ /**
+ * Finds the first non-empty value for the given authentication field, supporting alternate fields.
+ * @param {string[]} fields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
+ * @returns {Promise<{ authField: string, authValue: string} | null>} An object containing the authentication field and value, or null if not found.
+ */
+ const findAuthValue = async (fields) => {
+ for (const field of fields) {
+ let value = process.env[field];
+ if (value) {
+ return { authField: field, authValue: value };
+ }
+ try {
+ value = await getUserPluginAuthValue(userId, field);
+ } catch (err) {
+ if (field === fields[fields.length - 1] && !value) {
+ throw err;
+ }
+ }
+ if (value) {
+ return { authField: field, authValue: value };
+ }
+ }
+ return null;
+ };
+
+ for (let authField of authFields) {
+ const fields = authField.split('||');
+ const result = await findAuthValue(fields);
+ if (result) {
+ authValues[result.authField] = result.authValue;
}
- authValues[authField] = authValue;
}
return new ToolConstructor({ ...options, ...authValues, userId });
@@ -90,8 +155,10 @@ const loadTools = async ({
returnMap = false,
tools = [],
options = {},
+ skipSpecs = false,
}) => {
const toolConstructors = {
+ tavily_search_results_json: TavilySearchResults,
calculator: Calculator,
google: GoogleSearchAPI,
wolfram: functions ? StructuredWolfram : WolframAlphaAPI,
@@ -99,6 +166,7 @@ const loadTools = async ({
'stable-diffusion': functions ? StructuredSD : StableDiffusionAPI,
'azure-ai-search': functions ? StructuredACS : AzureAISearch,
CodeBrew: CodeBrew,
+ traversaal_search: TraversaalSearch,
};
const openAIApiKey = await getOpenAIKey(options, user);
@@ -168,10 +236,19 @@ const loadTools = async ({
toolConstructors.codesherpa = CodeSherpa;
}
+ const imageGenOptions = {
+ req: options.req,
+ fileStrategy: options.fileStrategy,
+ processFileURL: options.processFileURL,
+ returnMetadata: options.returnMetadata,
+ uploadImageBuffer: options.uploadImageBuffer,
+ };
+
const toolOptions = {
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
- dalle: { fileStrategy: options.fileStrategy },
- 'dall-e': { fileStrategy: options.fileStrategy },
+ dalle: imageGenOptions,
+ 'dall-e': imageGenOptions,
+ 'stable-diffusion': imageGenOptions,
};
const toolAuthFields = {};
@@ -194,7 +271,7 @@ const loadTools = async ({
if (toolConstructors[tool]) {
const options = toolOptions[tool] || {};
- const toolInstance = await loadToolWithAuth(
+ const toolInstance = loadToolWithAuth(
user,
toolAuthFields[tool],
toolConstructors[tool],
@@ -210,7 +287,7 @@ const loadTools = async ({
}
let specs = null;
- if (functions && remainingTools.length > 0) {
+ if (functions && remainingTools.length > 0 && skipSpecs !== true) {
specs = await loadSpecs({
llm: model,
user,
@@ -237,6 +314,9 @@ const loadTools = async ({
let result = [];
for (const tool of tools) {
const validTool = requestedTools[tool];
+ if (!validTool) {
+ continue;
+ }
const plugin = await validTool();
if (Array.isArray(plugin)) {
@@ -250,6 +330,7 @@ const loadTools = async ({
};
module.exports = {
+ loadToolWithAuth,
validateTools,
loadTools,
};
diff --git a/api/app/clients/tools/util/handleTools.test.js b/api/app/clients/tools/util/handleTools.test.js
index 40d8bc6129e..2c977714275 100644
--- a/api/app/clients/tools/util/handleTools.test.js
+++ b/api/app/clients/tools/util/handleTools.test.js
@@ -4,26 +4,33 @@ const mockUser = {
findByIdAndDelete: jest.fn(),
};
-var mockPluginService = {
+const mockPluginService = {
updateUserPluginAuth: jest.fn(),
deleteUserPluginAuth: jest.fn(),
getUserPluginAuthValue: jest.fn(),
};
-jest.mock('../../../../models/User', () => {
+jest.mock('~/models/User', () => {
return function () {
return mockUser;
};
});
-jest.mock('../../../../server/services/PluginService', () => mockPluginService);
+jest.mock('~/server/services/PluginService', () => mockPluginService);
-const User = require('../../../../models/User');
-const { validateTools, loadTools } = require('./');
-const PluginService = require('../../../../server/services/PluginService');
-const { BaseChatModel } = require('langchain/chat_models/openai');
const { Calculator } = require('langchain/tools/calculator');
-const { availableTools, OpenAICreateImage, GoogleSearchAPI, StructuredSD } = require('../');
+const { BaseChatModel } = require('langchain/chat_models/openai');
+
+const User = require('~/models/User');
+const PluginService = require('~/server/services/PluginService');
+const { validateTools, loadTools, loadToolWithAuth } = require('./handleTools');
+const {
+ availableTools,
+ OpenAICreateImage,
+ GoogleSearchAPI,
+ StructuredSD,
+ WolframAlphaAPI,
+} = require('../');
describe('Tool Handlers', () => {
let fakeUser;
@@ -44,7 +51,10 @@ describe('Tool Handlers', () => {
});
mockPluginService.updateUserPluginAuth.mockImplementation(
(userId, authField, _pluginKey, credential) => {
- userAuthValues[`${userId}-${authField}`] = credential;
+ const fields = authField.split('||');
+ fields.forEach((field) => {
+ userAuthValues[`${userId}-${field}`] = credential;
+ });
},
);
@@ -53,6 +63,7 @@ describe('Tool Handlers', () => {
username: 'fakeuser',
email: 'fakeuser@example.com',
emailVerified: false,
+ // file deepcode ignore NoHardcodedPasswords/test: fake value
password: 'fakepassword123',
avatar: '',
provider: 'local',
@@ -133,6 +144,18 @@ describe('Tool Handlers', () => {
loadTool2 = toolFunctions[sampleTools[1]];
loadTool3 = toolFunctions[sampleTools[2]];
});
+
+ let originalEnv;
+
+ beforeEach(() => {
+ originalEnv = process.env;
+ process.env = { ...originalEnv };
+ });
+
+ afterEach(() => {
+ process.env = originalEnv;
+ });
+
it('returns the expected load functions for requested tools', async () => {
expect(loadTool1).toBeDefined();
expect(loadTool2).toBeDefined();
@@ -149,6 +172,86 @@ describe('Tool Handlers', () => {
expect(authTool).toBeInstanceOf(ToolClass);
expect(tool).toBeInstanceOf(ToolClass2);
});
+
+ it('should initialize an authenticated tool with primary auth field', async () => {
+ process.env.DALLE2_API_KEY = 'mocked_api_key';
+ const initToolFunction = loadToolWithAuth(
+ 'userId',
+ ['DALLE2_API_KEY||DALLE_API_KEY'],
+ ToolClass,
+ );
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(ToolClass);
+ expect(mockPluginService.getUserPluginAuthValue).not.toHaveBeenCalled();
+ });
+
+ it('should initialize an authenticated tool with alternate auth field when primary is missing', async () => {
+ delete process.env.DALLE2_API_KEY; // Ensure the primary key is not set
+ process.env.DALLE_API_KEY = 'mocked_alternate_api_key';
+ const initToolFunction = loadToolWithAuth(
+ 'userId',
+ ['DALLE2_API_KEY||DALLE_API_KEY'],
+ ToolClass,
+ );
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(ToolClass);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledTimes(1);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledWith(
+ 'userId',
+ 'DALLE2_API_KEY',
+ );
+ });
+
+ it('should fallback to getUserPluginAuthValue when env vars are missing', async () => {
+ mockPluginService.updateUserPluginAuth('userId', 'DALLE_API_KEY', 'dalle', 'mocked_api_key');
+ const initToolFunction = loadToolWithAuth(
+ 'userId',
+ ['DALLE2_API_KEY||DALLE_API_KEY'],
+ ToolClass,
+ );
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(ToolClass);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledTimes(2);
+ });
+
+ it('should initialize an authenticated tool with singular auth field', async () => {
+ process.env.WOLFRAM_APP_ID = 'mocked_app_id';
+ const initToolFunction = loadToolWithAuth('userId', ['WOLFRAM_APP_ID'], WolframAlphaAPI);
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(WolframAlphaAPI);
+ expect(mockPluginService.getUserPluginAuthValue).not.toHaveBeenCalled();
+ });
+
+ it('should initialize an authenticated tool when env var is set', async () => {
+ process.env.WOLFRAM_APP_ID = 'mocked_app_id';
+ const initToolFunction = loadToolWithAuth('userId', ['WOLFRAM_APP_ID'], WolframAlphaAPI);
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(WolframAlphaAPI);
+ expect(mockPluginService.getUserPluginAuthValue).not.toHaveBeenCalledWith(
+ 'userId',
+ 'WOLFRAM_APP_ID',
+ );
+ });
+
+ it('should fallback to getUserPluginAuthValue when singular env var is missing', async () => {
+ delete process.env.WOLFRAM_APP_ID; // Ensure the environment variable is not set
+ mockPluginService.getUserPluginAuthValue.mockResolvedValue('mocked_user_auth_value');
+ const initToolFunction = loadToolWithAuth('userId', ['WOLFRAM_APP_ID'], WolframAlphaAPI);
+ const authTool = await initToolFunction();
+
+ expect(authTool).toBeInstanceOf(WolframAlphaAPI);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledTimes(1);
+ expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledWith(
+ 'userId',
+ 'WOLFRAM_APP_ID',
+ );
+ });
+
it('should throw an error for an unauthenticated tool', async () => {
try {
await loadTool2();
diff --git a/api/app/clients/tools/util/loadToolSuite.js b/api/app/clients/tools/util/loadToolSuite.js
index 2b4500a4f77..4392d61b9a6 100644
--- a/api/app/clients/tools/util/loadToolSuite.js
+++ b/api/app/clients/tools/util/loadToolSuite.js
@@ -1,17 +1,49 @@
-const { getUserPluginAuthValue } = require('../../../../server/services/PluginService');
+const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { availableTools } = require('../');
+const { logger } = require('~/config');
-const loadToolSuite = async ({ pluginKey, tools, user, options }) => {
+/**
+ * Loads a suite of tools with authentication values for a given user, supporting alternate authentication fields.
+ * Authentication fields can have alternates separated by "||", and the first defined variable will be used.
+ *
+ * @param {Object} params Parameters for loading the tool suite.
+ * @param {string} params.pluginKey Key identifying the plugin whose tools are to be loaded.
+ * @param {Array} params.tools Array of tool constructor functions.
+ * @param {Object} params.user User object for whom the tools are being loaded.
+ * @param {Object} [params.options={}] Optional parameters to be passed to each tool constructor.
+ * @returns {Promise} A promise that resolves to an array of instantiated tools.
+ */
+const loadToolSuite = async ({ pluginKey, tools, user, options = {} }) => {
const authConfig = availableTools.find((tool) => tool.pluginKey === pluginKey).authConfig;
const suite = [];
const authValues = {};
+ const findAuthValue = async (authField) => {
+ const fields = authField.split('||');
+ for (const field of fields) {
+ let value = process.env[field];
+ if (value) {
+ return value;
+ }
+ try {
+ value = await getUserPluginAuthValue(user, field);
+ if (value) {
+ return value;
+ }
+ } catch (err) {
+ logger.error(`Error fetching plugin auth value for ${field}: ${err.message}`);
+ }
+ }
+ return null;
+ };
+
for (const auth of authConfig) {
- let authValue = process.env[auth.authField];
- if (!authValue) {
- authValue = await getUserPluginAuthValue(user, auth.authField);
+ const authValue = await findAuthValue(auth.authField);
+ if (authValue !== null) {
+ authValues[auth.authField] = authValue;
+ } else {
+ logger.warn(`[loadToolSuite] No auth value found for ${auth.authField}`);
}
- authValues[auth.authField] = authValue;
}
for (const tool of tools) {
diff --git a/api/cache/getLogStores.js b/api/cache/getLogStores.js
index 016c7700009..786bb1f1f74 100644
--- a/api/cache/getLogStores.js
+++ b/api/cache/getLogStores.js
@@ -1,5 +1,5 @@
const Keyv = require('keyv');
-const { CacheKeys } = require('librechat-data-provider');
+const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
@@ -23,6 +23,22 @@ const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
+const tokenConfig = isEnabled(USE_REDIS) // ttl: 30 minutes
+ ? new Keyv({ store: keyvRedis, ttl: 1800000 })
+ : new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: 1800000 });
+
+const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
+ ? new Keyv({ store: keyvRedis, ttl: 120000 })
+ : new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
+
+const modelQueries = isEnabled(process.env.USE_REDIS)
+ ? new Keyv({ store: keyvRedis })
+ : new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
+
+const abortKeys = isEnabled(USE_REDIS)
+ ? new Keyv({ store: keyvRedis })
+ : new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
+
const namespaces = {
[CacheKeys.CONFIG_STORE]: config,
pending_req,
@@ -31,9 +47,17 @@ const namespaces = {
concurrent: createViolationInstance('concurrent'),
non_browser: createViolationInstance('non_browser'),
message_limit: createViolationInstance('message_limit'),
- token_balance: createViolationInstance('token_balance'),
+ token_balance: createViolationInstance(ViolationTypes.TOKEN_BALANCE),
registrations: createViolationInstance('registrations'),
+ [ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
+ [ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
+ ViolationTypes.ILLEGAL_MODEL_REQUEST,
+ ),
logins: createViolationInstance('logins'),
+ [CacheKeys.ABORT_KEYS]: abortKeys,
+ [CacheKeys.TOKEN_CONFIG]: tokenConfig,
+ [CacheKeys.GEN_TITLE]: genTitle,
+ [CacheKeys.MODEL_QUERIES]: modelQueries,
};
/**
diff --git a/api/config/parsers.js b/api/config/parsers.js
index 59685eab0bf..16c85cba4f4 100644
--- a/api/config/parsers.js
+++ b/api/config/parsers.js
@@ -33,6 +33,10 @@ function getMatchingSensitivePatterns(valueStr) {
* @returns {string} - The redacted console message.
*/
function redactMessage(str) {
+ if (!str) {
+ return '';
+ }
+
const patterns = getMatchingSensitivePatterns(str);
if (patterns.length === 0) {
diff --git a/api/config/paths.js b/api/config/paths.js
index 41e3ac5054f..165e9e6cd4f 100644
--- a/api/config/paths.js
+++ b/api/config/paths.js
@@ -1,7 +1,14 @@
const path = require('path');
module.exports = {
+ root: path.resolve(__dirname, '..', '..'),
+ uploads: path.resolve(__dirname, '..', '..', 'uploads'),
+ clientPath: path.resolve(__dirname, '..', '..', 'client'),
dist: path.resolve(__dirname, '..', '..', 'client', 'dist'),
publicPath: path.resolve(__dirname, '..', '..', 'client', 'public'),
+ fonts: path.resolve(__dirname, '..', '..', 'client', 'public', 'fonts'),
+ assets: path.resolve(__dirname, '..', '..', 'client', 'public', 'assets'),
imageOutput: path.resolve(__dirname, '..', '..', 'client', 'public', 'images'),
+ structuredTools: path.resolve(__dirname, '..', 'app', 'clients', 'tools', 'structured'),
+ pluginManifest: path.resolve(__dirname, '..', 'app', 'clients', 'tools', 'manifest.json'),
};
diff --git a/api/config/winston.js b/api/config/winston.js
index 6cba153f163..81e972fbbc3 100644
--- a/api/config/winston.js
+++ b/api/config/winston.js
@@ -5,7 +5,15 @@ const { redactFormat, redactMessage, debugTraverse } = require('./parsers');
const logDir = path.join(__dirname, '..', 'logs');
-const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false } = process.env;
+const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false, CONSOLE_JSON = false } = process.env;
+
+const useConsoleJson =
+ (typeof CONSOLE_JSON === 'string' && CONSOLE_JSON?.toLowerCase() === 'true') ||
+ CONSOLE_JSON === true;
+
+const useDebugConsole =
+ (typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
+ DEBUG_CONSOLE === true;
const levels = {
error: 0,
@@ -33,7 +41,7 @@ const level = () => {
const fileFormat = winston.format.combine(
redactFormat(),
- winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
+ winston.format.timestamp({ format: () => new Date().toISOString() }),
winston.format.errors({ stack: true }),
winston.format.splat(),
// redactErrors(),
@@ -99,14 +107,20 @@ const consoleFormat = winston.format.combine(
}),
);
-if (
- (typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
- DEBUG_CONSOLE === true
-) {
+if (useDebugConsole) {
transports.push(
new winston.transports.Console({
level: 'debug',
- format: winston.format.combine(consoleFormat, debugTraverse),
+ format: useConsoleJson
+ ? winston.format.combine(fileFormat, debugTraverse, winston.format.json())
+ : winston.format.combine(fileFormat, debugTraverse),
+ }),
+ );
+} else if (useConsoleJson) {
+ transports.push(
+ new winston.transports.Console({
+ level: 'info',
+ format: winston.format.combine(fileFormat, winston.format.json()),
}),
);
} else {
diff --git a/api/models/Action.js b/api/models/Action.js
new file mode 100644
index 00000000000..5141569c103
--- /dev/null
+++ b/api/models/Action.js
@@ -0,0 +1,68 @@
+const mongoose = require('mongoose');
+const actionSchema = require('./schema/action');
+
+const Action = mongoose.model('action', actionSchema);
+
+/**
+ * Update an action with new data without overwriting existing properties,
+ * or create a new action if it doesn't exist.
+ *
+ * @param {Object} searchParams - The search parameters to find the action to update.
+ * @param {string} searchParams.action_id - The ID of the action to update.
+ * @param {string} searchParams.user - The user ID of the action's author.
+ * @param {Object} updateData - An object containing the properties to update.
+ * @returns {Promise