From 29a151c1a779b8fd55946e0fdedabebcd325db11 Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 10:46:13 +0200 Subject: [PATCH 01/20] moved core repositories --- backend/core/quivr_core/api/__init__.py | 0 backend/core/quivr_core/api/celery_config.py | 43 ++ backend/core/quivr_core/api/celery_worker.py | 324 ++++++++++ backend/core/quivr_core/api/dependencies.py | 51 ++ backend/core/quivr_core/api/logger.py | 45 ++ backend/core/quivr_core/api/main.py | 22 + .../core/quivr_core/api/models/__init__.py | 0 .../models/brains_subscription_invitations.py | 14 + .../api/models/databases/__init__.py | 0 .../api/models/databases/llm_models.py | 13 + .../api/models/databases/repository.py | 85 +++ .../api/models/databases/supabase/__init__.py | 8 + .../brains_subscription_invitations.py | 40 ++ .../api/models/databases/supabase/files.py | 28 + .../api/models/databases/supabase/supabase.py | 21 + .../models/databases/supabase/user_usage.py | 128 ++++ .../api/models/databases/supabase/vectors.py | 76 +++ backend/core/quivr_core/api/models/files.py | 107 ++++ .../quivr_core/api/models/files_in_storage.py | 16 + .../core/quivr_core/api/models/settings.py | 189 ++++++ .../api/models/sqlalchemy_repository.py | 73 +++ .../core/quivr_core/api/modules/__init__.py | 0 .../quivr_core/api/modules/brain/__init__.py | 0 .../api/modules/brain/api_brain_qa.py | 500 +++++++++++++++ .../api/modules/brain/composite_brain_qa.py | 593 ++++++++++++++++++ .../api/modules/brain/controller/__init__.py | 1 + .../modules/brain/controller/brain_routes.py | 210 +++++++ .../api/modules/brain/dto/__init__.py | 0 .../api/modules/brain/dto/inputs.py | 88 +++ .../api/modules/brain/entity/__init__.py | 1 + .../entity/api_brain_definition_entity.py | 47 ++ .../api/modules/brain/entity/brain_entity.py | 135 ++++ .../composite_brain_connection_entity.py | 8 + .../modules/brain/entity/integration_brain.py | 46 ++ .../modules/brain/integrations/Big/Brain.py | 146 +++++ .../brain/integrations/Big/__init__.py | 0 .../brain/integrations/Claude/Brain.py | 101 +++ .../brain/integrations/Claude/__init__.py | 0 .../modules/brain/integrations/GPT4/Brain.py | 283 +++++++++ .../brain/integrations/GPT4/__init__.py | 0 .../integrations/Multi_Contract/Brain.py | 205 ++++++ .../integrations/Multi_Contract/__init__.py | 0 .../brain/integrations/Notion/Brain.py | 25 + .../integrations/Notion/Notion_connector.py | 393 ++++++++++++ .../brain/integrations/Notion/__init__.py | 0 .../modules/brain/integrations/Proxy/Brain.py | 135 ++++ .../brain/integrations/Proxy/__init__.py | 0 .../modules/brain/integrations/SQL/Brain.py | 104 +++ .../brain/integrations/SQL/SQL_connector.py | 41 ++ .../brain/integrations/SQL/__init__.py | 0 .../modules/brain/integrations/Self/Brain.py | 487 ++++++++++++++ .../brain/integrations/Self/__init__.py | 0 .../modules/brain/integrations/__init__.py | 0 .../api/modules/brain/knowledge_brain_qa.py | 513 +++++++++++++++ .../api/modules/brain/qa_headless.py | 270 ++++++++ .../api/modules/brain/qa_interface.py | 58 ++ .../api/modules/brain/rags/__init__.py | 0 .../api/modules/brain/rags/quivr_rag.py | 377 +++++++++++ .../api/modules/brain/rags/rag_interface.py | 31 + .../api/modules/brain/repository/__init__.py | 4 + .../brain/repository/api_brain_definitions.py | 60 ++ .../api/modules/brain/repository/brains.py | 127 ++++ .../modules/brain/repository/brains_users.py | 207 ++++++ .../brain/repository/brains_vectors.py | 105 ++++ .../composite_brains_connections.py | 63 ++ .../brain/repository/external_api_secrets.py | 60 ++ .../brain/repository/integration_brains.py | 148 +++++ .../brain/repository/interfaces/__init__.py | 10 + .../api_brain_definitions_interface.py | 38 ++ .../repository/interfaces/brains_interface.py | 61 ++ .../interfaces/brains_users_interface.py | 95 +++ .../interfaces/brains_vectors_interface.py | 41 ++ .../composite_brains_connections_interface.py | 40 ++ .../external_api_secrets_interface.py | 29 + .../integration_brains_interface.py | 63 ++ .../api/modules/brain/service/__init__.py | 0 .../service/api_brain_definition_service.py | 40 ++ .../service/brain_authorization_service.py | 78 +++ .../modules/brain/service/brain_service.py | 370 +++++++++++ .../service/brain_subscription/__init__.py | 2 + .../resend_invitation_email.py | 57 ++ .../subscription_invitation_service.py | 106 ++++ .../brain/service/brain_user_service.py | 116 ++++ .../brain/service/brain_vector_service.py | 63 ++ .../modules/brain/service/call_brain_api.py | 118 ++++ ...get_api_brain_definition_as_json_schema.py | 65 ++ .../get_question_context_from_brain.py | 67 ++ .../service/integration_brain_service.py | 31 + .../brain/service/test_brain_service.py | 0 .../modules/brain/service/utils/__init__.py | 1 + .../service/utils/format_chat_history.py | 31 + .../service/utils/get_prompt_to_use_id.py | 21 + .../brain/service/utils/validate_brain.py | 14 + .../brain/tests/test_brains_interface.py | 0 .../quivr_core/api/modules/chat/__init__.py | 6 + .../api/modules/chat/controller/__init__.py | 1 + .../modules/chat/controller/chat/__init_.py | 0 .../chat/controller/chat/brainful_chat.py | 113 ++++ .../chat/controller/chat/brainless_chat.py | 27 + .../modules/chat/controller/chat/factory.py | 11 + .../modules/chat/controller/chat/interface.py | 17 + .../chat/controller/chat/test_utils.py | 0 .../api/modules/chat/controller/chat/utils.py | 110 ++++ .../modules/chat/controller/chat_routes.py | 300 +++++++++ .../api/modules/chat/dto/__init__.py | 0 .../quivr_core/api/modules/chat/dto/chats.py | 47 ++ .../quivr_core/api/modules/chat/dto/inputs.py | 46 ++ .../api/modules/chat/dto/outputs.py | 54 ++ .../api/modules/chat/entity/__init__.py | 0 .../api/modules/chat/entity/chat.py | 81 +++ .../api/modules/chat/repository/__init__.py | 0 .../api/modules/chat/repository/chats.py | 127 ++++ .../chat/repository/chats_interface.py | 93 +++ .../api/modules/chat/service/__init__.py | 0 .../api/modules/chat/service/chat_service.py | 209 ++++++ .../api/modules/chat/service/utils.py | 44 ++ .../api/modules/chat/tests/conftest.py | 0 .../api/modules/chat/tests/test_chats.py | 177 ++++++ .../quivr_core/api/modules/dependencies.py | 71 +++ .../api/modules/knowledge/__init__.py | 0 .../modules/knowledge/controller/__init__.py | 1 + .../knowledge/controller/knowledge_routes.py | 102 +++ .../api/modules/knowledge/dto/__init__.py | 2 + .../api/modules/knowledge/dto/inputs.py | 18 + .../api/modules/knowledge/dto/outputs.py | 8 + .../api/modules/knowledge/entity/__init__.py | 1 + .../api/modules/knowledge/entity/knowledge.py | 12 + .../modules/knowledge/repository/__init__.py | 1 + .../repository/knowledge_interface.py | 58 ++ .../knowledge/repository/knowledges.py | 114 ++++ .../modules/knowledge/repository/storage.py | 30 + .../knowledge/repository/storage_interface.py | 10 + .../api/modules/knowledge/service/__init__.py | 0 .../knowledge/service/knowledge_service.py | 45 ++ .../quivr_core/api/modules/prompt/__init__.py | 0 .../api/modules/prompt/controller/__init__.py | 1 + .../prompt/controller/prompt_routes.py | 56 ++ .../api/modules/prompt/entity/__init__.py | 7 + .../api/modules/prompt/entity/prompt.py | 53 ++ .../api/modules/prompt/repository/__init__.py | 0 .../api/modules/prompt/repository/prompts.py | 103 +++ .../prompt/repository/prompts_interface.py | 57 ++ .../api/modules/prompt/service/__init__.py | 1 + .../prompt/service/get_prompt_to_use.py | 17 + .../modules/prompt/service/prompt_service.py | 59 ++ .../api/modules/prompt/tests/test_prompt.py | 0 .../quivr_core/api/modules/upload/__init__.py | 0 .../api/modules/upload/controller/__init__.py | 1 + .../upload/controller/upload_routes.py | 120 ++++ .../api/modules/upload/service/__init__.py | 0 .../service/generate_file_signed_url.py | 27 + .../api/modules/upload/service/list_files.py | 17 + .../api/modules/upload/service/upload_file.py | 135 ++++ .../modules/upload/tests/test_files/test.bib | 6 + .../modules/upload/tests/test_files/test.csv | 17 + .../modules/upload/tests/test_files/test.pdf | Bin 0 -> 10683 bytes .../modules/upload/tests/test_files/test.txt | 1 + .../api/modules/upload/tests/test_upload.py | 0 .../quivr_core/api/modules/user/__init__.py | 0 .../api/modules/user/controller/__init__.py | 1 + .../user/controller/user_controller.py | 116 ++++ .../quivr_core/api/modules/user/dto/inputs.py | 12 + .../api/modules/user/entity/user_identity.py | 28 + .../api/modules/user/repository/__init__.py | 1 + .../api/modules/user/repository/users.py | 127 ++++ .../user/repository/users_interface.py | 63 ++ .../api/modules/user/service/__init__.py | 1 + .../api/modules/user/service/user_service.py | 17 + .../api/modules/user/service/user_usage.py | 99 +++ .../user/tests/test_user_controller.py | 0 .../core/quivr_core/api/packages/__init__.py | 0 .../api/packages/emails/__init__.py | 0 .../api/packages/emails/send_email.py | 11 + .../api/packages/embeddings/__init__.py | 0 .../api/packages/embeddings/vectors.py | 73 +++ .../quivr_core/api/packages/files/__init__.py | 0 .../api/packages/files/crawl/__init__.py | 0 .../api/packages/files/crawl/crawler.py | 44 ++ .../quivr_core/api/packages/files/file.py | 45 ++ .../api/packages/files/loaders/__init__.py | 0 .../api/packages/files/loaders/telegram.py | 65 ++ .../api/packages/files/parsers/__init__.py | 0 .../api/packages/files/parsers/audio.py | 52 ++ .../api/packages/files/parsers/bibtex.py | 18 + .../api/packages/files/parsers/code_python.py | 18 + .../api/packages/files/parsers/common.py | 118 ++++ .../api/packages/files/parsers/csv.py | 18 + .../api/packages/files/parsers/docx.py | 18 + .../api/packages/files/parsers/epub.py | 18 + .../api/packages/files/parsers/github.py | 79 +++ .../api/packages/files/parsers/html.py | 18 + .../api/packages/files/parsers/markdown.py | 18 + .../api/packages/files/parsers/notebook.py | 18 + .../api/packages/files/parsers/odt.py | 18 + .../api/packages/files/parsers/pdf.py | 22 + .../api/packages/files/parsers/powerpoint.py | 18 + .../api/packages/files/parsers/telegram.py | 17 + .../api/packages/files/parsers/txt.py | 18 + .../api/packages/files/parsers/xlsx.py | 18 + .../api/packages/files/processors.py | 108 ++++ .../api/packages/quivr_core/__init__.py | 0 .../api/packages/quivr_core/config.py | 23 + .../api/packages/quivr_core/models.py | 58 ++ .../api/packages/quivr_core/prompts.py | 58 ++ .../api/packages/quivr_core/quivr_rag.py | 232 +++++++ .../api/packages/quivr_core/rag_factory.py | 32 + .../api/packages/quivr_core/rag_service.py | 296 +++++++++ .../api/packages/quivr_core/utils.py | 272 ++++++++ .../quivr_core/api/packages/utils/__init__.py | 2 + .../utils/handle_request_validation_error.py | 24 + .../api/packages/utils/parse_message_time.py | 5 + .../api/packages/utils/telemetry.py | 60 ++ backend/core/quivr_core/api/tests/conftest.py | 61 ++ 213 files changed, 13464 insertions(+) create mode 100644 backend/core/quivr_core/api/__init__.py create mode 100644 backend/core/quivr_core/api/celery_config.py create mode 100644 backend/core/quivr_core/api/celery_worker.py create mode 100644 backend/core/quivr_core/api/dependencies.py create mode 100644 backend/core/quivr_core/api/logger.py create mode 100644 backend/core/quivr_core/api/main.py create mode 100644 backend/core/quivr_core/api/models/__init__.py create mode 100644 backend/core/quivr_core/api/models/brains_subscription_invitations.py create mode 100644 backend/core/quivr_core/api/models/databases/__init__.py create mode 100644 backend/core/quivr_core/api/models/databases/llm_models.py create mode 100644 backend/core/quivr_core/api/models/databases/repository.py create mode 100644 backend/core/quivr_core/api/models/databases/supabase/__init__.py create mode 100644 backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py create mode 100644 backend/core/quivr_core/api/models/databases/supabase/files.py create mode 100644 backend/core/quivr_core/api/models/databases/supabase/supabase.py create mode 100644 backend/core/quivr_core/api/models/databases/supabase/user_usage.py create mode 100644 backend/core/quivr_core/api/models/databases/supabase/vectors.py create mode 100644 backend/core/quivr_core/api/models/files.py create mode 100644 backend/core/quivr_core/api/models/files_in_storage.py create mode 100644 backend/core/quivr_core/api/models/settings.py create mode 100644 backend/core/quivr_core/api/models/sqlalchemy_repository.py create mode 100644 backend/core/quivr_core/api/modules/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/api_brain_qa.py create mode 100644 backend/core/quivr_core/api/modules/brain/composite_brain_qa.py create mode 100644 backend/core/quivr_core/api/modules/brain/controller/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/controller/brain_routes.py create mode 100644 backend/core/quivr_core/api/modules/brain/dto/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/dto/inputs.py create mode 100644 backend/core/quivr_core/api/modules/brain/entity/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/entity/api_brain_definition_entity.py create mode 100644 backend/core/quivr_core/api/modules/brain/entity/brain_entity.py create mode 100644 backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py create mode 100644 backend/core/quivr_core/api/modules/brain/entity/integration_brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Big/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Claude/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/GPT4/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Notion/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Proxy/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/SQL/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Self/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/integrations/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py create mode 100644 backend/core/quivr_core/api/modules/brain/qa_headless.py create mode 100644 backend/core/quivr_core/api/modules/brain/qa_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/rags/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py create mode 100644 backend/core/quivr_core/api/modules/brain/rags/rag_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/brains.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/brains_users.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/integration_brains.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_user_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/call_brain_api.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/test_brain_service.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/utils/__init__.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/utils/format_chat_history.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/utils/get_prompt_to_use_id.py create mode 100644 backend/core/quivr_core/api/modules/brain/service/utils/validate_brain.py create mode 100644 backend/core/quivr_core/api/modules/brain/tests/test_brains_interface.py create mode 100644 backend/core/quivr_core/api/modules/chat/__init__.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/__init__.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/__init_.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/factory.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/interface.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/test_utils.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/utils.py create mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat_routes.py create mode 100644 backend/core/quivr_core/api/modules/chat/dto/__init__.py create mode 100644 backend/core/quivr_core/api/modules/chat/dto/chats.py create mode 100644 backend/core/quivr_core/api/modules/chat/dto/inputs.py create mode 100644 backend/core/quivr_core/api/modules/chat/dto/outputs.py create mode 100644 backend/core/quivr_core/api/modules/chat/entity/__init__.py create mode 100644 backend/core/quivr_core/api/modules/chat/entity/chat.py create mode 100644 backend/core/quivr_core/api/modules/chat/repository/__init__.py create mode 100644 backend/core/quivr_core/api/modules/chat/repository/chats.py create mode 100644 backend/core/quivr_core/api/modules/chat/repository/chats_interface.py create mode 100644 backend/core/quivr_core/api/modules/chat/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/chat/service/chat_service.py create mode 100644 backend/core/quivr_core/api/modules/chat/service/utils.py create mode 100644 backend/core/quivr_core/api/modules/chat/tests/conftest.py create mode 100644 backend/core/quivr_core/api/modules/chat/tests/test_chats.py create mode 100644 backend/core/quivr_core/api/modules/dependencies.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/__init__.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/controller/__init__.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/dto/__init__.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/dto/inputs.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/dto/outputs.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/entity/__init__.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/entity/knowledge.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/repository/__init__.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/repository/knowledge_interface.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/repository/knowledges.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/repository/storage.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/repository/storage_interface.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/knowledge/service/knowledge_service.py create mode 100644 backend/core/quivr_core/api/modules/prompt/__init__.py create mode 100644 backend/core/quivr_core/api/modules/prompt/controller/__init__.py create mode 100644 backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py create mode 100644 backend/core/quivr_core/api/modules/prompt/entity/__init__.py create mode 100644 backend/core/quivr_core/api/modules/prompt/entity/prompt.py create mode 100644 backend/core/quivr_core/api/modules/prompt/repository/__init__.py create mode 100644 backend/core/quivr_core/api/modules/prompt/repository/prompts.py create mode 100644 backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py create mode 100644 backend/core/quivr_core/api/modules/prompt/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py create mode 100644 backend/core/quivr_core/api/modules/prompt/service/prompt_service.py create mode 100644 backend/core/quivr_core/api/modules/prompt/tests/test_prompt.py create mode 100644 backend/core/quivr_core/api/modules/upload/__init__.py create mode 100644 backend/core/quivr_core/api/modules/upload/controller/__init__.py create mode 100644 backend/core/quivr_core/api/modules/upload/controller/upload_routes.py create mode 100644 backend/core/quivr_core/api/modules/upload/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/upload/service/generate_file_signed_url.py create mode 100644 backend/core/quivr_core/api/modules/upload/service/list_files.py create mode 100644 backend/core/quivr_core/api/modules/upload/service/upload_file.py create mode 100644 backend/core/quivr_core/api/modules/upload/tests/test_files/test.bib create mode 100644 backend/core/quivr_core/api/modules/upload/tests/test_files/test.csv create mode 100644 backend/core/quivr_core/api/modules/upload/tests/test_files/test.pdf create mode 100644 backend/core/quivr_core/api/modules/upload/tests/test_files/test.txt create mode 100644 backend/core/quivr_core/api/modules/upload/tests/test_upload.py create mode 100644 backend/core/quivr_core/api/modules/user/__init__.py create mode 100644 backend/core/quivr_core/api/modules/user/controller/__init__.py create mode 100644 backend/core/quivr_core/api/modules/user/controller/user_controller.py create mode 100644 backend/core/quivr_core/api/modules/user/dto/inputs.py create mode 100644 backend/core/quivr_core/api/modules/user/entity/user_identity.py create mode 100644 backend/core/quivr_core/api/modules/user/repository/__init__.py create mode 100644 backend/core/quivr_core/api/modules/user/repository/users.py create mode 100644 backend/core/quivr_core/api/modules/user/repository/users_interface.py create mode 100644 backend/core/quivr_core/api/modules/user/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/user/service/user_service.py create mode 100644 backend/core/quivr_core/api/modules/user/service/user_usage.py create mode 100644 backend/core/quivr_core/api/modules/user/tests/test_user_controller.py create mode 100644 backend/core/quivr_core/api/packages/__init__.py create mode 100644 backend/core/quivr_core/api/packages/emails/__init__.py create mode 100644 backend/core/quivr_core/api/packages/emails/send_email.py create mode 100644 backend/core/quivr_core/api/packages/embeddings/__init__.py create mode 100644 backend/core/quivr_core/api/packages/embeddings/vectors.py create mode 100644 backend/core/quivr_core/api/packages/files/__init__.py create mode 100644 backend/core/quivr_core/api/packages/files/crawl/__init__.py create mode 100644 backend/core/quivr_core/api/packages/files/crawl/crawler.py create mode 100644 backend/core/quivr_core/api/packages/files/file.py create mode 100644 backend/core/quivr_core/api/packages/files/loaders/__init__.py create mode 100644 backend/core/quivr_core/api/packages/files/loaders/telegram.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/__init__.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/audio.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/bibtex.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/code_python.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/common.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/csv.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/docx.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/epub.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/github.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/html.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/markdown.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/notebook.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/odt.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/pdf.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/powerpoint.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/telegram.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/txt.py create mode 100644 backend/core/quivr_core/api/packages/files/parsers/xlsx.py create mode 100644 backend/core/quivr_core/api/packages/files/processors.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/__init__.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/config.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/models.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/prompts.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/rag_factory.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/rag_service.py create mode 100644 backend/core/quivr_core/api/packages/quivr_core/utils.py create mode 100644 backend/core/quivr_core/api/packages/utils/__init__.py create mode 100644 backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py create mode 100644 backend/core/quivr_core/api/packages/utils/parse_message_time.py create mode 100644 backend/core/quivr_core/api/packages/utils/telemetry.py create mode 100644 backend/core/quivr_core/api/tests/conftest.py diff --git a/backend/core/quivr_core/api/__init__.py b/backend/core/quivr_core/api/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/celery_config.py b/backend/core/quivr_core/api/celery_config.py new file mode 100644 index 000000000000..9ee82370361c --- /dev/null +++ b/backend/core/quivr_core/api/celery_config.py @@ -0,0 +1,43 @@ +# celery_config.py +import os + +import dotenv +from celery import Celery + +dotenv.load_dotenv() + +CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "") +CELERY_BROKER_QUEUE_NAME = os.getenv("CELERY_BROKER_QUEUE_NAME", "quivr") + +celery = Celery(__name__) + +if CELERY_BROKER_URL.startswith("sqs"): + broker_transport_options = { + CELERY_BROKER_QUEUE_NAME: { + "my-q": { + "url": CELERY_BROKER_URL, + } + } + } + celery = Celery( + __name__, + broker=CELERY_BROKER_URL, + task_serializer="json", + task_concurrency=4, + worker_prefetch_multiplier=1, + broker_transport_options=broker_transport_options, + ) + celery.conf.task_default_queue = CELERY_BROKER_QUEUE_NAME +elif CELERY_BROKER_URL.startswith("redis"): + celery = Celery( + __name__, + broker=f"{CELERY_BROKER_URL}", + backend=f"{CELERY_BROKER_URL}", + task_concurrency=4, + worker_prefetch_multiplier=2, + task_serializer="json", + ) +else: + raise ValueError(f"Unsupported broker URL: {CELERY_BROKER_URL}") + +celery.autodiscover_tasks(["quivr_api.modules.sync.tasks"]) diff --git a/backend/core/quivr_core/api/celery_worker.py b/backend/core/quivr_core/api/celery_worker.py new file mode 100644 index 000000000000..318fbc545ed8 --- /dev/null +++ b/backend/core/quivr_core/api/celery_worker.py @@ -0,0 +1,324 @@ +import os +from datetime import datetime, timedelta +from tempfile import NamedTemporaryFile +from uuid import UUID + +from celery.schedules import crontab +from pytz import timezone + +from quivr_core.celery_config import celery +from quivr_core.logger import get_logger +from quivr_core.middlewares.auth.auth_bearer import AuthBearer +from quivr_core.models.files import File +from quivr_core.models.settings import get_supabase_client, get_supabase_db +from quivr_core.modules.brain.integrations.Notion.Notion_connector import ( + NotionConnector, +) +from quivr_core.modules.brain.service.brain_service import BrainService +from quivr_core.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.modules.notification.dto.inputs import NotificationUpdatableProperties +from quivr_core.modules.notification.entity.notification import NotificationsStatusEnum +from quivr_core.modules.notification.service.notification_service import ( + NotificationService, +) +from quivr_core.modules.onboarding.service.onboarding_service import OnboardingService +from quivr_core.packages.files.crawl.crawler import CrawlWebsite, slugify +from quivr_core.packages.files.parsers.github import process_github +from quivr_core.packages.files.processors import filter_file +from quivr_core.packages.utils.telemetry import maybe_send_telemetry + +logger = get_logger(__name__) + +onboardingService = OnboardingService() +notification_service = NotificationService() +brain_service = BrainService() +auth_bearer = AuthBearer() + + +@celery.task(name="process_file_and_notify") +def process_file_and_notify( + file_name: str, + file_original_name: str, + brain_id, + notification_id=None, + integration=None, + delete_file=False, +): + try: + supabase_client = get_supabase_client() + tmp_name = file_name.replace("/", "_") + base_file_name = os.path.basename(file_name) + _, file_extension = os.path.splitext(base_file_name) + + with NamedTemporaryFile( + suffix="_" + tmp_name, # pyright: ignore reportPrivateUsage=none + ) as tmp_file: + res = supabase_client.storage.from_("quivr").download(file_name) + tmp_file.write(res) + tmp_file.flush() + file_instance = File( + file_name=base_file_name, + tmp_file_path=tmp_file.name, + bytes_content=res, + file_size=len(res), + file_extension=file_extension, + ) + brain_vector_service = BrainVectorService(brain_id) + if delete_file: # TODO fix bug + brain_vector_service.delete_file_from_brain( + file_original_name, only_vectors=True + ) + + filter_file( + file=file_instance, + brain_id=brain_id, + original_file_name=file_original_name, + ) + + if notification_id: + notification_service.update_notification_by_id( + notification_id, + NotificationUpdatableProperties( + status=NotificationsStatusEnum.SUCCESS, + description="Your file has been properly uploaded!", + ), + ) + brain_service.update_brain_last_update_time(brain_id) + + return True + + except TimeoutError: + logger.error("TimeoutError") + + except Exception as e: + logger.exception(e) + notification_service.update_notification_by_id( + notification_id, + NotificationUpdatableProperties( + status=NotificationsStatusEnum.ERROR, + description=f"An error occurred while processing the file: {e}", + ), + ) + + +@celery.task(name="process_crawl_and_notify") +def process_crawl_and_notify( + crawl_website_url: str, + brain_id: UUID, + notification_id=None, +): + crawl_website = CrawlWebsite(url=crawl_website_url) + + if not crawl_website.checkGithub(): + # Build file data + extracted_content = crawl_website.process() + extracted_content_bytes = extracted_content.encode("utf-8") + file_name = slugify(crawl_website.url) + ".txt" + + with NamedTemporaryFile( + suffix="_" + file_name, # pyright: ignore reportPrivateUsage=none + ) as tmp_file: + tmp_file.write(extracted_content_bytes) + tmp_file.flush() + file_instance = File( + file_name=file_name, + tmp_file_path=tmp_file.name, + bytes_content=extracted_content_bytes, + file_size=len(extracted_content), + file_extension=".txt", + ) + filter_file( + file=file_instance, + brain_id=brain_id, + original_file_name=crawl_website_url, + ) + notification_service.update_notification_by_id( + notification_id, + NotificationUpdatableProperties( + status=NotificationsStatusEnum.SUCCESS, + description="Your URL has been properly crawled!", + ), + ) + else: + process_github( + repo=crawl_website.url, + brain_id=brain_id, + ) + + if notification_id: + notification_service.update_notification_by_id( + notification_id, + NotificationUpdatableProperties( + status=NotificationsStatusEnum.SUCCESS, + description="Your file has been properly uploaded!", + ), + ) + + brain_service.update_brain_last_update_time(brain_id) + return True + + +@celery.task +def remove_onboarding_more_than_x_days_task(): + onboardingService.remove_onboarding_more_than_x_days(7) + + +@celery.task(name="NotionConnectorLoad") +def process_integration_brain_created_initial_load(brain_id, user_id): + notion_connector = NotionConnector(brain_id=brain_id, user_id=user_id) + + pages = notion_connector.load() + + print("pages: ", len(pages)) + + +@celery.task +def process_integration_brain_sync_user_brain(brain_id, user_id): + notion_connector = NotionConnector(brain_id=brain_id, user_id=user_id) + + notion_connector.poll() + + +@celery.task +def ping_telemetry(): + maybe_send_telemetry("ping", {"ping": "pong"}) + + +@celery.task(name="check_if_is_premium_user") +def check_if_is_premium_user(): + supabase = get_supabase_db() + supabase_db = supabase.db + + paris_tz = timezone("Europe/Paris") + current_time = datetime.now(paris_tz) + current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f") + logger.debug(f"Current time: {current_time_str}") + + # Define the memoization period (e.g., 1 hour) + memoization_period = timedelta(hours=1) + memoization_cutoff = current_time - memoization_period + + # Fetch all necessary data in bulk + subscriptions = ( + supabase_db.table("subscriptions") + .select("*") + .filter("current_period_end", "gt", current_time_str) + .execute() + ).data + + customers = (supabase_db.table("customers").select("*").execute()).data + + customer_emails = [customer["email"] for customer in customers] + + # Split customer emails into batches of 50 + email_batches = [ + customer_emails[i : i + 20] for i in range(0, len(customer_emails), 20) + ] + + users = [] + for email_batch in email_batches: + batch_users = ( + supabase_db.table("users") + .select("id, email") + .in_("email", email_batch) + .execute() + ).data + users.extend(batch_users) + + product_features = ( + supabase_db.table("product_to_features").select("*").execute() + ).data + + user_settings = (supabase_db.table("user_settings").select("*").execute()).data + + # Create lookup dictionaries for faster access + user_dict = {user["email"]: user["id"] for user in users} + customer_dict = {customer["id"]: customer for customer in customers} + product_dict = { + product["stripe_product_id"]: product for product in product_features + } + settings_dict = {setting["user_id"]: setting for setting in user_settings} + + # Process subscriptions and update user settings + premium_user_ids = set() + settings_to_upsert = {} + for sub in subscriptions: + if sub["attrs"]["status"] != "active": + continue + + customer = customer_dict.get(sub["customer"]) + if not customer: + continue + + user_id = user_dict.get(customer["email"]) + if not user_id: + continue + + current_settings = settings_dict.get(user_id, {}) + last_check = current_settings.get("last_stripe_check") + + # Skip if the user was checked recently + if last_check and datetime.fromisoformat(last_check) > memoization_cutoff: + premium_user_ids.add(user_id) + continue + + user_id = str(user_id) # Ensure user_id is a string + premium_user_ids.add(user_id) + + product_id = sub["attrs"]["items"]["data"][0]["plan"]["product"] + product = product_dict.get(product_id) + if not product: + logger.warning(f"No matching product found for subscription: {sub['id']}") + continue + + settings_to_upsert[user_id] = { + "user_id": user_id, + "max_brains": product["max_brains"], + "max_brain_size": product["max_brain_size"], + "monthly_chat_credit": product["monthly_chat_credit"], + "api_access": product["api_access"], + "models": product["models"], + "is_premium": True, + "last_stripe_check": current_time_str, + } + + # Bulk upsert premium user settings in batches of 10 + settings_list = list(settings_to_upsert.values()) + for i in range(0, len(settings_list), 10): + batch = settings_list[i : i + 10] + supabase_db.table("user_settings").upsert(batch).execute() + + # Delete settings for non-premium users in batches of 10 + settings_to_delete = [ + setting["user_id"] + for setting in user_settings + if setting["user_id"] not in premium_user_ids and setting.get("is_premium") + ] + for i in range(0, len(settings_to_delete), 10): + batch = settings_to_delete[i : i + 10] + supabase_db.table("user_settings").delete().in_("user_id", batch).execute() + + logger.info( + f"Updated {len(settings_to_upsert)} premium users, deleted settings for {len(settings_to_delete)} non-premium users" + ) + return True + + +celery.conf.beat_schedule = { + "remove_onboarding_more_than_x_days_task": { + "task": f"{__name__}.remove_onboarding_more_than_x_days_task", + "schedule": crontab(minute="0", hour="0"), + }, + "ping_telemetry": { + "task": f"{__name__}.ping_telemetry", + "schedule": crontab(minute="*/30", hour="*"), + }, + "process_sync_active": { + "task": "process_sync_active", + "schedule": crontab(minute="*/1", hour="*"), + }, + "process_premium_users": { + "task": "check_if_is_premium_user", + "schedule": crontab(minute="*/1", hour="*"), + }, +} diff --git a/backend/core/quivr_core/api/dependencies.py b/backend/core/quivr_core/api/dependencies.py new file mode 100644 index 000000000000..d66a773365fc --- /dev/null +++ b/backend/core/quivr_core/api/dependencies.py @@ -0,0 +1,51 @@ +import os +from typing import AsyncGenerator, Callable, Type, TypeVar + +from fastapi import Depends +from sqlalchemy.ext.asyncio import create_async_engine +from sqlmodel.ext.asyncio.session import AsyncSession + +from quivr_core.api.repositories.base_repository import BaseRepository +from quivr_core.api.services.base_service import BaseService +from quivr_core.models.settings import settings +from quivr_core.storage.local_storage import LocalStorage +from quivr_core.storage.storage_base import StorageBase + +R = TypeVar("R", bound=BaseRepository) +S = TypeVar("S", bound=BaseService) + +async_engine = create_async_engine( + settings.pg_database_async_url, + echo=True if os.getenv("ORM_DEBUG") else False, + future=True, +) + +# TODO: get env variable and set it +storage = LocalStorage() + + +def get_storage() -> StorageBase: + return storage + + +async def get_async_session() -> AsyncGenerator[AsyncSession, None]: + async with AsyncSession(async_engine) as session: + yield session + + +def get_repository(repository_model: Type[R]) -> Callable[..., R]: + def _get_repository(session: AsyncSession = Depends(get_async_session)) -> R: + return repository_model(session) + + return _get_repository + + +def get_service(service: Type[S]) -> Callable[..., S]: + def _get_service( + repository: BaseRepository = Depends( + get_repository(service.get_repository_cls()) + ), + ) -> S: + return service(repository) + + return _get_service diff --git a/backend/core/quivr_core/api/logger.py b/backend/core/quivr_core/api/logger.py new file mode 100644 index 000000000000..b839e9aef4f1 --- /dev/null +++ b/backend/core/quivr_core/api/logger.py @@ -0,0 +1,45 @@ +import logging +import os +from logging.handlers import RotatingFileHandler + +from colorlog import ( + ColoredFormatter, +) + + +def get_logger(logger_name, log_file="application.log"): + log_level = os.getenv("LOG_LEVEL", "WARNING").upper() + logger = logging.getLogger(logger_name) + logger.setLevel(log_level) + logger.propagate = False # Prevent log propagation to avoid double logging + + formatter = logging.Formatter( + "[%(levelname)s] %(name)s [%(filename)s:%(lineno)d]: %(message)s" + ) + + color_formatter = ColoredFormatter( + "%(log_color)s[%(levelname)s]%(reset)s %(name)s [%(filename)s:%(lineno)d]: %(message)s", + log_colors={ + "DEBUG": "cyan", + "INFO": "green", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "red,bg_white", + }, + reset=True, + style="%", + ) + + console_handler = logging.StreamHandler() + console_handler.setFormatter(color_formatter) + + file_handler = RotatingFileHandler( + log_file, maxBytes=5000000, backupCount=5 + ) # 5MB file + file_handler.setFormatter(formatter) + + if not logger.handlers: + logger.addHandler(console_handler) + logger.addHandler(file_handler) + + return logger diff --git a/backend/core/quivr_core/api/main.py b/backend/core/quivr_core/api/main.py new file mode 100644 index 000000000000..a75da91d0d1f --- /dev/null +++ b/backend/core/quivr_core/api/main.py @@ -0,0 +1,22 @@ +from dotenv import load_dotenv +from fastapi import FastAPI + +from quivr_core.api.modules.brain.controller import brain_router +from quivr_core.api.modules.chat.controller import chat_router +from quivr_core.api.modules.knowledge.controller import knowledge_router +from quivr_core.api.modules.prompt.controller import prompt_router +from quivr_core.api.modules.upload.controller import upload_router +from quivr_core.api.modules.user.controller import user_router + +load_dotenv() + +app = FastAPI() + + +app.include_router(brain_router) +app.include_router(chat_router) + +app.include_router(upload_router) +app.include_router(user_router) +app.include_router(prompt_router) +app.include_router(knowledge_router) diff --git a/backend/core/quivr_core/api/models/__init__.py b/backend/core/quivr_core/api/models/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/models/brains_subscription_invitations.py b/backend/core/quivr_core/api/models/brains_subscription_invitations.py new file mode 100644 index 000000000000..862610ccd080 --- /dev/null +++ b/backend/core/quivr_core/api/models/brains_subscription_invitations.py @@ -0,0 +1,14 @@ +from uuid import UUID + +from pydantic import BaseModel, ConfigDict + +from quivr_core.api.logger import get_logger + +logger = get_logger(__name__) + + +class BrainSubscription(BaseModel): + brain_id: UUID + email: str + rights: str = "Viewer" + model_config = ConfigDict(arbitrary_types_allowed=True) diff --git a/backend/core/quivr_core/api/models/databases/__init__.py b/backend/core/quivr_core/api/models/databases/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/models/databases/llm_models.py b/backend/core/quivr_core/api/models/databases/llm_models.py new file mode 100644 index 000000000000..00722817ca77 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/llm_models.py @@ -0,0 +1,13 @@ +from pydantic import BaseModel + + +class LLMModel(BaseModel): + """LLM models stored in the database that are allowed to be used by the users. + Args: + BaseModel (BaseModel): Pydantic BaseModel + """ + + name: str = "gpt-3.5-turbo-0125" + price: int = 1 + max_input: int = 512 + max_output: int = 512 diff --git a/backend/core/quivr_core/api/models/databases/repository.py b/backend/core/quivr_core/api/models/databases/repository.py new file mode 100644 index 000000000000..1b19d64ff552 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/repository.py @@ -0,0 +1,85 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from uuid import UUID + +from .llm_models import LLMModel + + +class Repository(ABC): + @abstractmethod + def create_user_daily_usage(self, user_id: UUID, user_email: str, date: datetime): + pass + + @abstractmethod + def get_user_usage(self, user_id: UUID): + pass + + @abstractmethod + def get_models(self) -> LLMModel | None: + pass + + @abstractmethod + def get_user_requests_count_for_month(self, user_id: UUID, date: datetime): + pass + + @abstractmethod + def update_user_request_count(self, user_id: UUID, date: str): + pass + + @abstractmethod + def increment_user_request_count( + self, user_id: UUID, date: str, current_request_count + ): + pass + + @abstractmethod + def set_file_vectors_ids(self, file_sha1: str): + pass + + @abstractmethod + def get_brain_vectors_by_brain_id_and_file_sha1( + self, brain_id: UUID, file_sha1: str + ): + pass + + @abstractmethod + def create_subscription_invitation( + self, brain_id: UUID, user_email: str, rights: str + ): + pass + + @abstractmethod + def update_subscription_invitation( + self, brain_id: UUID, user_email: str, rights: str + ): + pass + + @abstractmethod + def get_subscription_invitations_by_brain_id_and_email( + self, brain_id: UUID, user_email: str + ): + pass + + @abstractmethod + def get_vectors_by_file_name(self, file_name: str): + pass + + @abstractmethod + def similarity_search(self, query_embedding, table: str, k: int, threshold: float): + pass + + @abstractmethod + def update_summary(self, document_id: UUID, summary_id: int): + pass + + @abstractmethod + def get_vectors_by_batch(self, batch_id: UUID): + pass + + @abstractmethod + def get_vectors_in_batch(self, batch_ids): + pass + + @abstractmethod + def get_vectors_by_file_sha1(self, file_sha1): + pass diff --git a/backend/core/quivr_core/api/models/databases/supabase/__init__.py b/backend/core/quivr_core/api/models/databases/supabase/__init__.py new file mode 100644 index 000000000000..aa1798c69935 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/supabase/__init__.py @@ -0,0 +1,8 @@ +from quivr_core.api.models.databases.supabase.brains_subscription_invitations import ( + BrainSubscription, +) +from quivr_core.api.models.databases.supabase.files import File +from quivr_core.api.models.databases.supabase.user_usage import UserUsage +from quivr_core.api.models.databases.supabase.vectors import Vector + +__all__ = ["BrainSubscription", "File", "UserUsage", "Vector"] diff --git a/backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py b/backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py new file mode 100644 index 000000000000..d2ce6a639c66 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py @@ -0,0 +1,40 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.models.databases.repository import Repository + +logger = get_logger(__name__) + + +class BrainSubscription(Repository): + def __init__(self, supabase_client): + self.db = supabase_client + + def create_subscription_invitation(self, brain_id, user_email, rights): + logger.info("Creating subscription invitation") + response = ( + self.db.table("brain_subscription_invitations") + .insert({"brain_id": str(brain_id), "email": user_email, "rights": rights}) + .execute() + ) + return response.data + + def update_subscription_invitation(self, brain_id, user_email, rights): + logger.info("Updating subscription invitation") + response = ( + self.db.table("brain_subscription_invitations") + .update({"rights": rights}) + .eq("brain_id", str(brain_id)) + .eq("email", user_email) + .execute() + ) + return response.data + + def get_subscription_invitations_by_brain_id_and_email(self, brain_id, user_email): + response = ( + self.db.table("brain_subscription_invitations") + .select("*") + .eq("brain_id", str(brain_id)) + .eq("email", user_email) + .execute() + ) + + return response diff --git a/backend/core/quivr_core/api/models/databases/supabase/files.py b/backend/core/quivr_core/api/models/databases/supabase/files.py new file mode 100644 index 000000000000..6298b1cc0926 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/supabase/files.py @@ -0,0 +1,28 @@ +from quivr_core.api.models.databases.repository import Repository + + +class File(Repository): + def __init__(self, supabase_client): + self.db = supabase_client + + def set_file_vectors_ids(self, file_sha1): + response = ( + self.db.table("vectors") + .select("id") + .filter("file_sha1", "eq", file_sha1) + .execute() + ) + return response.data + + def get_brain_vectors_by_brain_id_and_file_sha1(self, brain_id, file_sha1): + self.set_file_vectors_ids(file_sha1) + # Check if file exists in that brain + response = ( + self.db.table("brains_vectors") + .select("brain_id, vector_id") + .filter("brain_id", "eq", str(brain_id)) + .filter("file_sha1", "eq", file_sha1) + .execute() + ) + + return response diff --git a/backend/core/quivr_core/api/models/databases/supabase/supabase.py b/backend/core/quivr_core/api/models/databases/supabase/supabase.py new file mode 100644 index 000000000000..5c4f40b30e44 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/supabase/supabase.py @@ -0,0 +1,21 @@ +from quivr_core.api.models.databases.supabase import ( + BrainSubscription, + File, + UserUsage, + Vector, +) + + +# TODO: REMOVE THIS CLASS ! +class SupabaseDB( + UserUsage, + File, + BrainSubscription, + Vector, +): + def __init__(self, supabase_client): + self.db = supabase_client + UserUsage.__init__(self, supabase_client) + File.__init__(self, supabase_client) + BrainSubscription.__init__(self, supabase_client) + Vector.__init__(self, supabase_client) diff --git a/backend/core/quivr_core/api/models/databases/supabase/user_usage.py b/backend/core/quivr_core/api/models/databases/supabase/user_usage.py new file mode 100644 index 000000000000..c10f834c7752 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/supabase/user_usage.py @@ -0,0 +1,128 @@ +from datetime import datetime, timedelta +from uuid import UUID + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.databases.repository import Repository + +logger = get_logger(__name__) + + +# TODO: change the name of this class because another one already exists +class UserUsage(Repository): + def __init__(self, supabase_client): + self.db = supabase_client + + def create_user_daily_usage( + self, user_id: UUID, user_email: str, date: datetime, number: int = 1 + ): + return ( + self.db.table("user_daily_usage") + .insert( + { + "user_id": str(user_id), + "email": user_email, + "date": date, + "daily_requests_count": number, + } + ) + .execute() + ) + + def get_user_settings(self, user_id): + """ + Fetch the user settings from the database + """ + + user_settings_response = ( + self.db.from_("user_settings") + .select("*") + .filter("user_id", "eq", str(user_id)) + .execute() + ).data + + if len(user_settings_response) == 0: + # Create the user settings + user_settings_response = ( + self.db.table("user_settings") + .insert({"user_id": str(user_id)}) + .execute() + ).data + + if len(user_settings_response) == 0: + raise ValueError("User settings could not be created") + + user_settings = user_settings_response[0] + + return user_settings + + def get_models(self): + model_settings_response = (self.db.from_("models").select("*").execute()).data + if len(model_settings_response) == 0: + raise ValueError("An issue occured while fetching the model settings") + return model_settings_response + + def get_user_monthly(self, user_id): + pass + + def get_user_usage(self, user_id): + """ + Fetch the user request stats from the database + """ + requests_stats = ( + self.db.from_("user_daily_usage") + .select("*") + .filter("user_id", "eq", user_id) + .execute() + ) + return requests_stats.data + + def get_user_requests_count_for_day(self, user_id, date): + """ + Fetch the user request count from the database + """ + response = ( + self.db.from_("user_daily_usage") + .select("daily_requests_count") + .filter("user_id", "eq", user_id) + .filter("date", "eq", date) + .execute() + ).data + + if response and len(response) > 0: + return response[0]["daily_requests_count"] + return 0 + + def get_user_requests_count_for_month(self, user_id, date): + """ + Fetch the user request count from the database + """ + date_30_days_ago = (datetime.now() - timedelta(days=30)).strftime("%Y%m%d") + + response = ( + self.db.from_("user_daily_usage") + .select("daily_requests_count") + .filter("user_id", "eq", user_id) + .filter("date", "gte", date_30_days_ago) + .execute() + ).data + + if response and len(response) > 0: + return sum(row["daily_requests_count"] for row in response) + return 0 + + def increment_user_request_count(self, user_id, date, number: int = 1): + """ + Increment the user's requests count for a specific day + """ + + self.update_user_request_count(user_id, daily_requests_count=number, date=date) + + def update_user_request_count(self, user_id, daily_requests_count, date): + response = ( + self.db.table("user_daily_usage") + .update({"daily_requests_count": daily_requests_count}) + .match({"user_id": user_id, "date": date}) + .execute() + ) + + return response diff --git a/backend/core/quivr_core/api/models/databases/supabase/vectors.py b/backend/core/quivr_core/api/models/databases/supabase/vectors.py new file mode 100644 index 000000000000..b93ee701b542 --- /dev/null +++ b/backend/core/quivr_core/api/models/databases/supabase/vectors.py @@ -0,0 +1,76 @@ +from quivr_core.api.models.databases.repository import Repository + + +class Vector(Repository): + def __init__(self, supabase_client): + self.db = supabase_client + + def get_vectors_by_file_name(self, file_name): + response = ( + self.db.table("vectors") + .select( + "metadata->>file_name, metadata->>file_size, metadata->>file_extension, metadata->>file_url", + "content", + "brains_vectors(brain_id,vector_id)", + ) + .match({"metadata->>file_name": file_name}) + .execute() + ) + + return response + + def get_vectors_by_file_sha1(self, file_sha1): + response = ( + self.db.table("vectors") + .select("id") + .filter("file_sha1", "eq", file_sha1) + .execute() + ) + + return response + + # TODO: remove duplicate similarity_search in supabase vector store + def similarity_search(self, query_embedding, table, k, threshold): + response = self.db.rpc( + table, + { + "query_embedding": query_embedding, + "match_count": k, + "match_threshold": threshold, + }, + ).execute() + return response + + def update_summary(self, document_id, summary_id): + return ( + self.db.table("summaries") + .update({"document_id": document_id}) + .match({"id": summary_id}) + .execute() + ) + + def get_vectors_by_batch(self, batch_id): + response = ( + self.db.table("vectors") + .select( + "name:metadata->>file_name, size:metadata->>file_size", + count="exact", + ) + .eq("id", batch_id) + .execute() + ) + + return response + + def get_vectors_in_batch(self, batch_ids): + response = ( + self.db.table("vectors") + .select( + "name:metadata->>file_name, size:metadata->>file_size", + count="exact", + ) + .in_("id", batch_ids) + .execute() + ) + + return response diff --git a/backend/core/quivr_core/api/models/files.py b/backend/core/quivr_core/api/models/files.py new file mode 100644 index 000000000000..afad981976e1 --- /dev/null +++ b/backend/core/quivr_core/api/models/files.py @@ -0,0 +1,107 @@ +from pathlib import Path +from typing import List, Optional + +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_core.documents import Document +from pydantic import BaseModel + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.databases.supabase.supabase import SupabaseDB +from quivr_core.api.models.settings import get_supabase_db +from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.api.packages.files.file import compute_sha1_from_content + +logger = get_logger(__name__) + + +class File(BaseModel): + file_name: str + tmp_file_path: Path + bytes_content: bytes + file_size: int + file_extension: str + chunk_size: int = 400 + chunk_overlap: int = 100 + documents: List[Document] = [] + file_sha1: Optional[str] = None + vectors_ids: Optional[list] = [] + + def __init__(self, **data): + super().__init__(**data) + data["file_sha1"] = compute_sha1_from_content(data["bytes_content"]) + + @property + def supabase_db(self) -> SupabaseDB: + return get_supabase_db() + + def compute_documents(self, loader_class): + """ + Compute the documents from the file + + Args: + loader_class (class): The class of the loader to use to load the file + """ + logger.info(f"Computing documents from file {self.file_name}") + loader = loader_class(self.tmp_file_path) + documents = loader.load() + + text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( + chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap + ) + + self.documents = text_splitter.split_documents(documents) + + def set_file_vectors_ids(self): + """ + Set the vectors_ids property with the ids of the vectors + that are associated with the file in the vectors table + """ + self.vectors_ids = self.supabase_db.get_vectors_by_file_sha1( + self.file_sha1 + ).data + + def file_already_exists(self): + """ + Check if file already exists in vectors table + """ + self.set_file_vectors_ids() + + # if the file does not exist in vectors then no need to go check in brains_vectors + if len(self.vectors_ids) == 0: # pyright: ignore reportPrivateUsage=none + return False + + return True + + def file_already_exists_in_brain(self, brain_id): + """ + Check if file already exists in a brain + + Args: + brain_id (str): Brain id + """ + response = self.supabase_db.get_brain_vectors_by_brain_id_and_file_sha1( + brain_id, + self.file_sha1, # type: ignore + ) + + if len(response.data) == 0: + return False + + return True + + def file_is_empty(self): + """ + Check if file is empty by checking if the file pointer is at the beginning of the file + """ + return self.file_size < 1 # pyright: ignore reportPrivateUsage=none + + def link_file_to_brain(self, brain_id): + self.set_file_vectors_ids() + + if self.vectors_ids is None: + return + + brain_vector_service = BrainVectorService(brain_id) + + for vector_id in self.vectors_ids: # pyright: ignore reportPrivateUsage=none + brain_vector_service.create_brain_vector(vector_id["id"], self.file_sha1) diff --git a/backend/core/quivr_core/api/models/files_in_storage.py b/backend/core/quivr_core/api/models/files_in_storage.py new file mode 100644 index 000000000000..05e4d908cfa9 --- /dev/null +++ b/backend/core/quivr_core/api/models/files_in_storage.py @@ -0,0 +1,16 @@ +from uuid import UUID + +from pydantic import BaseModel + + +class FileInStorage(BaseModel): + Id: UUID + Key: str + + @property + def id(self) -> UUID: + return self.Id + + @property + def key(self) -> str: + return self.Key diff --git a/backend/core/quivr_core/api/models/settings.py b/backend/core/quivr_core/api/models/settings.py new file mode 100644 index 000000000000..8425a1144195 --- /dev/null +++ b/backend/core/quivr_core/api/models/settings.py @@ -0,0 +1,189 @@ +from typing import Optional +from uuid import UUID + +from langchain.embeddings.base import Embeddings +from langchain_community.embeddings.ollama import OllamaEmbeddings +from langchain_community.vectorstores.supabase import SupabaseVectorStore +from langchain_openai import OpenAIEmbeddings +from posthog import Posthog +from pydantic_settings import BaseSettings, SettingsConfigDict +from sqlalchemy import Engine, create_engine +from supabase.client import Client, create_client + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.databases.supabase.supabase import SupabaseDB + +logger = get_logger(__name__) + + +class BrainRateLimiting(BaseSettings): + model_config = SettingsConfigDict(validate_default=False) + max_brain_per_user: int = 5 + + +# The `PostHogSettings` class is used to initialize and interact with the PostHog analytics service. +class PostHogSettings(BaseSettings): + model_config = SettingsConfigDict(validate_default=False) + posthog_api_key: str | None = None + posthog_api_url: str | None = None + posthog: Posthog | None = None + + def __init__(self, *args, **kwargs): + """ + The function initializes the "posthog" attribute and calls the "initialize_posthog" method. + """ + super().__init__(*args, **kwargs) + self.posthog = None + self.initialize_posthog() + + def initialize_posthog(self): + """ + The function initializes a PostHog client with an API key and URL. + """ + if self.posthog_api_key and self.posthog_api_url: + self.posthog = Posthog( + api_key=self.posthog_api_key, host=self.posthog_api_url + ) + + def log_event(self, user_id: UUID, event_name: str, event_properties: dict): + """ + The function logs an event with a user ID, event name, and event properties using the PostHog + analytics tool. + + :param user_id: The user_id parameter is a UUID (Universally Unique Identifier) that uniquely + identifies a user. It is typically used to track and identify individual users in an application + or system + :type user_id: UUID + :param event_name: The event_name parameter is a string that represents the name or type of the + event that you want to log. It could be something like "user_signed_up", "item_purchased", or + "page_viewed" + :type event_name: str + :param event_properties: The event_properties parameter is a dictionary that contains additional + information or properties related to the event being logged. These properties provide more + context or details about the event and can be used for analysis or filtering purposes + :type event_properties: dict + """ + if self.posthog: + self.posthog.capture(user_id, event_name, event_properties) + + def set_user_properties(self, user_id: UUID, event_name, properties: dict): + """ + The function sets user properties for a given user ID and event name using the PostHog analytics + tool. + + :param user_id: The user_id parameter is a UUID (Universally Unique Identifier) that uniquely + identifies a user. It is used to associate the user with the event and properties being captured + :type user_id: UUID + :param event_name: The `event_name` parameter is a string that represents the name of the event + that you want to capture. It could be something like "user_signed_up" or "item_purchased" + :param properties: The `properties` parameter is a dictionary that contains the user properties + that you want to set. Each key-value pair in the dictionary represents a user property, where + the key is the name of the property and the value is the value you want to set for that property + :type properties: dict + """ + if self.posthog: + self.posthog.capture( + user_id, event=event_name, properties={"$set": properties} + ) + + def set_once_user_properties(self, user_id: UUID, event_name, properties: dict): + """ + The function sets user properties for a specific event, ensuring that the properties are only + set once. + + :param user_id: The user_id parameter is a UUID (Universally Unique Identifier) that uniquely + identifies a user + :type user_id: UUID + :param event_name: The `event_name` parameter is a string that represents the name of the event + that you want to capture. It could be something like "user_signed_up" or "item_purchased" + :param properties: The `properties` parameter is a dictionary that contains the user properties + that you want to set. Each key-value pair in the dictionary represents a user property, where + the key is the property name and the value is the property value + :type properties: dict + """ + if self.posthog: + self.posthog.capture( + user_id, event=event_name, properties={"$set_once": properties} + ) + + +class BrainSettings(BaseSettings): + model_config = SettingsConfigDict(validate_default=False) + pg_database_url: str + pg_database_async_url: str + openai_api_key: str = "" + supabase_url: str = "" + supabase_service_key: str = "" + resend_api_key: str = "null" + resend_email_address: str = "brain@mail.quivr.app" + ollama_api_base_url: str | None = None + langfuse_public_key: str | None = None + langfuse_secret_key: str | None = None + + +class ResendSettings(BaseSettings): + model_config = SettingsConfigDict(validate_default=False) + resend_api_key: str = "null" + + +# Global variables to store the Supabase client and database instances +_supabase_client: Optional[Client] = None +_supabase_db: Optional[SupabaseDB] = None +_db_engine: Optional[Engine] = None +_embedding_service = None + +settings = BrainSettings() # type: ignore + + +def get_pg_database_engine(): + global _db_engine + if _db_engine is None: + logger.info("Creating Postgres DB engine") + _db_engine = create_engine(settings.pg_database_url, pool_pre_ping=True) + return _db_engine + + +def get_pg_database_async_engine(): + global _db_engine + if _db_engine is None: + logger.info("Creating Postgres DB engine") + _db_engine = create_engine(settings.pg_database_async_url, pool_pre_ping=True) + return _db_engine + + +def get_supabase_client() -> Client: + global _supabase_client + if _supabase_client is None: + logger.info("Creating Supabase client") + _supabase_client = create_client( + settings.supabase_url, settings.supabase_service_key + ) + return _supabase_client + + +def get_supabase_db() -> SupabaseDB: + global _supabase_db + if _supabase_db is None: + logger.info("Creating Supabase DB") + _supabase_db = SupabaseDB(get_supabase_client()) + return _supabase_db + + +def get_embedding_client() -> Embeddings: + global _embedding_service + if settings.ollama_api_base_url: + embeddings = OllamaEmbeddings( + base_url=settings.ollama_api_base_url, + ) # pyright: ignore reportPrivateUsage=none + else: + embeddings = OpenAIEmbeddings() # pyright: ignore reportPrivateUsage=none + return embeddings + + +def get_documents_vector_store() -> SupabaseVectorStore: + embeddings = get_embedding_client() + supabase_client: Client = get_supabase_client() + documents_vector_store = SupabaseVectorStore( + supabase_client, embeddings, table_name="vectors" + ) + return documents_vector_store diff --git a/backend/core/quivr_core/api/models/sqlalchemy_repository.py b/backend/core/quivr_core/api/models/sqlalchemy_repository.py new file mode 100644 index 000000000000..7b295187973a --- /dev/null +++ b/backend/core/quivr_core/api/models/sqlalchemy_repository.py @@ -0,0 +1,73 @@ +from datetime import datetime +from uuid import uuid4 + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import relationship + +Base = declarative_base() + + +class User(Base): + __tablename__ = "users" + + user_id = Column(String, primary_key=True) + email = Column(String) + date = Column(DateTime) + daily_requests_count = Column(Integer) + + +class Brain(Base): + __tablename__ = "brains" + + brain_id = Column(Integer, primary_key=True) + name = Column(String) + users = relationship("BrainUser", back_populates="brain") + vectors = relationship("BrainVector", back_populates="brain") + + +class BrainUser(Base): + __tablename__ = "brains_users" + + id = Column(Integer, primary_key=True) + user_id = Column(Integer, ForeignKey("users.user_id")) + brain_id = Column(Integer, ForeignKey("brains.brain_id")) + rights = Column(String) + + user = relationship("User") + brain = relationship("Brain", back_populates="users") + + +class BrainVector(Base): + __tablename__ = "brains_vectors" + + vector_id = Column(String, primary_key=True, default=lambda: str(uuid4())) + brain_id = Column(Integer, ForeignKey("brains.brain_id")) + file_sha1 = Column(String) + + brain = relationship("Brain", back_populates="vectors") + + +class BrainSubscriptionInvitation(Base): + __tablename__ = "brain_subscription_invitations" + + id = Column(Integer, primary_key=True) # Assuming an integer primary key named 'id' + brain_id = Column(String, ForeignKey("brains.brain_id")) + email = Column(String, ForeignKey("users.email")) + rights = Column(String) + + brain = relationship("Brain") + user = relationship("User", foreign_keys=[email]) + + +class ApiKey(Base): + __tablename__ = "api_keys" + + key_id = Column(String, primary_key=True, default=lambda: str(uuid4())) + user_id = Column(Integer, ForeignKey("users.user_id")) + api_key = Column(String, unique=True) + creation_time = Column(DateTime, default=datetime.utcnow) + is_active = Column(Boolean, default=True) + deleted_time = Column(DateTime, nullable=True) + + user = relationship("User") diff --git a/backend/core/quivr_core/api/modules/__init__.py b/backend/core/quivr_core/api/modules/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/__init__.py b/backend/core/quivr_core/api/modules/brain/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/api_brain_qa.py b/backend/core/quivr_core/api/modules/brain/api_brain_qa.py new file mode 100644 index 000000000000..87ab8430fa75 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/api_brain_qa.py @@ -0,0 +1,500 @@ +import json +from typing import Optional +from uuid import UUID + +import jq +import requests +from fastapi import HTTPException +from litellm import completion + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.brain.qa_interface import QAInterface +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.call_brain_api import call_brain_api +from quivr_core.api.modules.brain.service.get_api_brain_definition_as_json_schema import ( + get_api_brain_definition_as_json_schema, +) +from quivr_core.api.modules.chat.dto.chats import ChatQuestion +from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service + +brain_service = BrainService() +chat_service = get_service(ChatService)() + +logger = get_logger(__name__) + + +class UUIDEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, UUID): + # if the object is uuid, we simply return the value of uuid + return str(obj) + return super().default(obj) + + +class APIBrainQA(KnowledgeBrainQA, QAInterface): + user_id: UUID + raw: bool = False + jq_instructions: Optional[str] = None + + def __init__( + self, + model: str, + brain_id: str, + chat_id: str, + streaming: bool = False, + prompt_id: Optional[UUID] = None, + raw: bool = False, + jq_instructions: Optional[str] = None, + **kwargs, + ): + user_id = kwargs.get("user_id") + if not user_id: + raise HTTPException(status_code=400, detail="Cannot find user id") + + super().__init__( + model=model, + brain_id=brain_id, + chat_id=chat_id, + streaming=streaming, + prompt_id=prompt_id, + **kwargs, + ) + self.user_id = user_id + self.raw = raw + self.jq_instructions = jq_instructions + + def get_api_call_response_as_text( + self, method, api_url, params, search_params, secrets + ) -> str: + headers = {} + + api_url_with_search_params = api_url + if search_params: + api_url_with_search_params += "?" + for search_param in search_params: + api_url_with_search_params += ( + f"{search_param}={search_params[search_param]}&" + ) + + for secret in secrets: + headers[secret] = secrets[secret] + + try: + if method in ["GET", "DELETE"]: + response = requests.request( + method, + url=api_url_with_search_params, + params=params or None, + headers=headers or None, + ) + elif method in ["POST", "PUT", "PATCH"]: + response = requests.request( + method, + url=api_url_with_search_params, + json=params or None, + headers=headers or None, + ) + else: + raise ValueError(f"Invalid method: {method}") + + return response.text + + except Exception as e: + logger.error(f"Error calling API: {e}") + return None + + def log_steps(self, message: str, type: str): + if "api" not in self.metadata: + self.metadata["api"] = {} + if "steps" not in self.metadata["api"]: + self.metadata["api"]["steps"] = [] + self.metadata["api"]["steps"].append( + { + "number": len(self.metadata["api"]["steps"]), + "type": type, + "message": message, + } + ) + + async def make_completion( + self, + messages, + functions, + brain_id: UUID, + recursive_count=0, + should_log_steps=True, + ) -> str | None: + if recursive_count > 5: + self.log_steps( + "The assistant is having issues and took more than 5 calls to the API. Please try again later or an other instruction.", + "error", + ) + return + + if "api" not in self.metadata: + self.metadata["api"] = {} + if "raw" not in self.metadata["api"]: + self.metadata["api"]["raw_enabled"] = self.raw + + response = completion( + model=self.model, + temperature=self.temperature, + max_tokens=self.max_tokens, + messages=messages, + functions=functions, + stream=True, + function_call="auto", + ) + + function_call = { + "name": None, + "arguments": "", + } + for chunk in response: + finish_reason = chunk.choices[0].finish_reason + if finish_reason == "stop": + self.log_steps("Quivr has finished", "info") + break + if ( + "function_call" in chunk.choices[0].delta + and chunk.choices[0].delta["function_call"] + ): + if chunk.choices[0].delta["function_call"].name: + function_call["name"] = chunk.choices[0].delta["function_call"].name + if chunk.choices[0].delta["function_call"].arguments: + function_call["arguments"] += ( + chunk.choices[0].delta["function_call"].arguments + ) + + elif finish_reason == "function_call": + try: + arguments = json.loads(function_call["arguments"]) + + except Exception: + self.log_steps(f"Issues with {arguments}", "error") + arguments = {} + + self.log_steps(f"Calling {brain_id} with arguments {arguments}", "info") + + try: + api_call_response = call_brain_api( + brain_id=brain_id, + user_id=self.user_id, + arguments=arguments, + ) + except Exception as e: + logger.info(f"Error while calling API: {e}") + api_call_response = f"Error while calling API: {e}" + function_name = function_call["name"] + self.log_steps("Quivr has called the API", "info") + messages.append( + { + "role": "function", + "name": function_call["name"], + "content": f"The function {function_name} was called and gave The following answer:(data from function) {api_call_response} (end of data from function). Don't call this function again unless there was an error or extremely necessary and asked specifically by the user. If an error, display it to the user in raw.", + } + ) + + self.metadata["api"]["raw_response"] = json.loads(api_call_response) + if self.raw: + # Yield the raw response in a format that can then be catched by the generate_stream function + response_to_yield = f"````raw_response: {api_call_response}````" + + yield response_to_yield + return + + async for value in self.make_completion( + messages=messages, + functions=functions, + brain_id=brain_id, + recursive_count=recursive_count + 1, + should_log_steps=should_log_steps, + ): + yield value + + else: + if ( + hasattr(chunk.choices[0], "delta") + and chunk.choices[0].delta + and hasattr(chunk.choices[0].delta, "content") + ): + content = chunk.choices[0].delta.content + yield content + else: # pragma: no cover + yield "**...**" + break + + async def generate_stream( + self, + chat_id: UUID, + question: ChatQuestion, + save_answer: bool = True, + should_log_steps: Optional[bool] = True, + ): + brain = brain_service.get_brain_by_id(self.brain_id) + + if not brain: + raise HTTPException(status_code=404, detail="Brain not found") + + prompt_content = "You are a helpful assistant that can access functions to help answer questions. If there are information missing in the question, you can ask follow up questions to get more information to the user. Once all the information is available, you can call the function to get the answer." + + if self.prompt_to_use: + prompt_content += self.prompt_to_use.content + + messages = [{"role": "system", "content": prompt_content}] + + history = chat_service.get_chat_history(self.chat_id) + + for message in history: + formatted_message = [ + {"role": "user", "content": message.user_message}, + {"role": "assistant", "content": message.assistant}, + ] + messages.extend(formatted_message) + + messages.append({"role": "user", "content": question.question}) + + if save_answer: + streamed_chat_history = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": "", + "brain_id": self.brain_id, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": streamed_chat_history.message_id, + "message_time": streamed_chat_history.message_time, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "brain_id": str(self.brain_id), + "metadata": self.metadata, + } + ) + else: + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": None, + "message_time": None, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "brain_id": str(self.brain_id), + "metadata": self.metadata, + } + ) + response_tokens = [] + async for value in self.make_completion( + messages=messages, + functions=[get_api_brain_definition_as_json_schema(brain)], + brain_id=self.brain_id, + should_log_steps=should_log_steps, + ): + # Look if the value is a raw response + if value.startswith("````raw_response:"): + raw_value_cleaned = value.replace("````raw_response: ", "").replace( + "````", "" + ) + logger.info(f"Raw response: {raw_value_cleaned}") + if self.jq_instructions: + json_raw_value_cleaned = json.loads(raw_value_cleaned) + raw_value_cleaned = ( + jq.compile(self.jq_instructions) + .input_value(json_raw_value_cleaned) + .first() + ) + streamed_chat_history.assistant = raw_value_cleaned + response_tokens.append(raw_value_cleaned) + yield f"data: {json.dumps(streamed_chat_history.dict())}" + else: + streamed_chat_history.assistant = value + response_tokens.append(value) + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + if save_answer: + chat_service.update_message_by_id( + message_id=str(streamed_chat_history.message_id), + user_message=question.question, + assistant="".join(str(token) for token in response_tokens), + metadata=self.metadata, + ) + + def make_completion_without_streaming( + self, + messages, + functions, + brain_id: UUID, + recursive_count=0, + should_log_steps=False, + ): + if recursive_count > 5: + print( + "The assistant is having issues and took more than 5 calls to the API. Please try again later or an other instruction." + ) + return + + if should_log_steps: + print("🧠🧠") + + response = completion( + model=self.model, + temperature=self.temperature, + max_tokens=self.max_tokens, + messages=messages, + functions=functions, + stream=False, + function_call="auto", + ) + + response_message = response.choices[0].message + finish_reason = response.choices[0].finish_reason + + if finish_reason == "function_call": + function_call = response_message.function_call + try: + arguments = json.loads(function_call.arguments) + + except Exception: + arguments = {} + + if should_log_steps: + self.log_steps(f"Calling {brain_id} with arguments {arguments}", "info") + + try: + api_call_response = call_brain_api( + brain_id=brain_id, + user_id=self.user_id, + arguments=arguments, + ) + except Exception as e: + raise HTTPException( + status_code=400, + detail=f"Error while calling API: {e}", + ) + + function_name = function_call.name + messages.append( + { + "role": "function", + "name": function_call.name, + "content": f"The function {function_name} was called and gave The following answer:(data from function) {api_call_response} (end of data from function). Don't call this function again unless there was an error or extremely necessary and asked specifically by the user.", + } + ) + + return self.make_completion_without_streaming( + messages=messages, + functions=functions, + brain_id=brain_id, + recursive_count=recursive_count + 1, + should_log_steps=should_log_steps, + ) + + if finish_reason == "stop": + return response_message + + else: + print("Never ending completion") + + def generate_answer( + self, + chat_id: UUID, + question: ChatQuestion, + save_answer: bool = True, + raw: bool = True, + ): + if not self.brain_id: + raise HTTPException( + status_code=400, detail="No brain id provided in the question" + ) + + brain = brain_service.get_brain_by_id(self.brain_id) + + if not brain: + raise HTTPException(status_code=404, detail="Brain not found") + + prompt_content = "You are a helpful assistant that can access functions to help answer questions. If there are information missing in the question, you can ask follow up questions to get more information to the user. Once all the information is available, you can call the function to get the answer." + + if self.prompt_to_use: + prompt_content += self.prompt_to_use.content + + messages = [{"role": "system", "content": prompt_content}] + + history = chat_service.get_chat_history(self.chat_id) + + for message in history: + formatted_message = [ + {"role": "user", "content": message.user_message}, + {"role": "assistant", "content": message.assistant}, + ] + messages.extend(formatted_message) + + messages.append({"role": "user", "content": question.question}) + + response = self.make_completion_without_streaming( + messages=messages, + functions=[get_api_brain_definition_as_json_schema(brain)], + brain_id=self.brain_id, + should_log_steps=False, + raw=raw, + ) + + answer = response.content + if save_answer: + new_chat = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "brain_id": self.brain_id, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "message_time": new_chat.message_time, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "message_id": new_chat.message_id, + "metadata": self.metadata, + "brain_id": str(self.brain_id), + } + ) + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "message_time": "123", + "prompt_title": None, + "brain_name": brain.name, + "message_id": None, + "metadata": self.metadata, + "brain_id": str(self.brain_id), + } + ) diff --git a/backend/core/quivr_core/api/modules/brain/composite_brain_qa.py b/backend/core/quivr_core/api/modules/brain/composite_brain_qa.py new file mode 100644 index 000000000000..25e460a5617a --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/composite_brain_qa.py @@ -0,0 +1,593 @@ +import json +from typing import Optional +from uuid import UUID + +from fastapi import HTTPException +from litellm import completion + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.api_brain_qa import APIBrainQA +from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity, BrainType +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.brain.qa_headless import HeadlessQA +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.chat.dto.chats import ChatQuestion +from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory +from quivr_core.api.modules.chat.dto.outputs import ( + BrainCompletionOutput, + CompletionMessage, + CompletionResponse, + GetChatHistoryOutput, +) +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service + +brain_service = BrainService() +chat_service = get_service(ChatService)() + +logger = get_logger(__name__) + + +def format_brain_to_tool(brain): + return { + "type": "function", + "function": { + "name": str(brain.id), + "description": brain.description, + "parameters": { + "type": "object", + "properties": { + "question": { + "type": "string", + "description": "Question to ask the brain", + }, + }, + "required": ["question"], + }, + }, + } + + +class CompositeBrainQA( + KnowledgeBrainQA, +): + user_id: UUID + + def __init__( + self, + model: str, + brain_id: str, + chat_id: str, + streaming: bool = False, + prompt_id: Optional[UUID] = None, + **kwargs, + ): + user_id = kwargs.get("user_id") + if not user_id: + raise HTTPException(status_code=400, detail="Cannot find user id") + + super().__init__( + model=model, + brain_id=brain_id, + chat_id=chat_id, + streaming=streaming, + prompt_id=prompt_id, + **kwargs, + ) + self.user_id = user_id + + def get_answer_generator_from_brain_type(self, brain: BrainEntity): + if brain.brain_type == BrainType.composite: + return self.generate_answer + elif brain.brain_type == BrainType.api: + return APIBrainQA( + brain_id=str(brain.id), + chat_id=self.chat_id, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + streaming=self.streaming, + prompt_id=self.prompt_id, + user_id=str(self.user_id), + raw=brain.raw, + jq_instructions=brain.jq_instructions, + ).generate_answer + elif brain.brain_type == BrainType.doc: + return KnowledgeBrainQA( + brain_id=str(brain.id), + chat_id=self.chat_id, + max_tokens=self.max_tokens, + temperature=self.temperature, + streaming=self.streaming, + prompt_id=self.prompt_id, + ).generate_answer + + def generate_answer( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool + ) -> str: + brain = brain_service.get_brain_by_id(question.brain_id) + + connected_brains = brain_service.get_connected_brains(self.brain_id) + + if not connected_brains: + response = HeadlessQA( + chat_id=chat_id, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + streaming=self.streaming, + prompt_id=self.prompt_id, + ).generate_answer(chat_id, question, save_answer=False) + if save_answer: + new_chat = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": response.assistant, + "brain_id": question.brain_id, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": response.assistant, + "message_time": new_chat.message_time, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name, + "message_id": new_chat.message_id, + "brain_id": str(brain.id), + } + ) + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": response.assistant, + "message_time": None, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name, + "message_id": None, + "brain_id": str(brain.id), + } + ) + + tools = [] + available_functions = {} + + connected_brains_details = {} + for connected_brain_id in connected_brains: + connected_brain = brain_service.get_brain_by_id(connected_brain_id) + if connected_brain is None: + continue + + tools.append(format_brain_to_tool(connected_brain)) + + available_functions[connected_brain_id] = ( + self.get_answer_generator_from_brain_type(connected_brain) + ) + + connected_brains_details[str(connected_brain.id)] = connected_brain + + CHOOSE_BRAIN_FROM_TOOLS_PROMPT = ( + "Based on the provided user content, find the most appropriate tools to answer" + + "If you can't find any tool to answer and only then, and if you can answer without using any tool. In that case, let the user know that you are not using any particular brain (i.e tool) " + ) + + messages = [{"role": "system", "content": CHOOSE_BRAIN_FROM_TOOLS_PROMPT}] + + history = chat_service.get_chat_history(self.chat_id) + + for message in history: + formatted_message = [ + {"role": "user", "content": message.user_message}, + {"role": "assistant", "content": message.assistant}, + ] + messages.extend(formatted_message) + + messages.append({"role": "user", "content": question.question}) + + response = completion( + model="gpt-3.5-turbo-0125", + messages=messages, + tools=tools, + tool_choice="auto", + ) + + brain_completion_output = self.make_recursive_tool_calls( + messages, + question, + chat_id, + tools, + available_functions, + recursive_count=0, + last_completion_response=response.choices[0], + ) + + if brain_completion_output: + answer = brain_completion_output.response.message.content + new_chat = None + if save_answer: + new_chat = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "brain_id": question.brain_id, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": brain_completion_output.response.message.content, + "message_time": new_chat.message_time if new_chat else None, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "message_id": new_chat.message_id if new_chat else None, + "brain_id": str(brain.id) if brain else None, + } + ) + + def make_recursive_tool_calls( + self, + messages, + question, + chat_id, + tools=[], + available_functions={}, + recursive_count=0, + last_completion_response: CompletionResponse = None, + ): + if recursive_count > 5: + print( + "The assistant is having issues and took more than 5 calls to the tools. Please try again later or an other instruction." + ) + return None + + finish_reason = last_completion_response.finish_reason + if finish_reason == "stop": + messages.append(last_completion_response.message) + return BrainCompletionOutput( + **{ + "messages": messages, + "question": question.question, + "response": last_completion_response, + } + ) + + if finish_reason == "tool_calls": + response_message: CompletionMessage = last_completion_response.message + tool_calls = response_message.tool_calls + + messages.append(response_message) + + if ( + len(tool_calls) == 0 + or tool_calls is None + or len(available_functions) == 0 + ): + return + + for tool_call in tool_calls: + function_name = tool_call.function.name + function_to_call = available_functions[function_name] + function_args = json.loads(tool_call.function.arguments) + question = ChatQuestion( + question=function_args["question"], brain_id=function_name + ) + + # TODO: extract chat_id from generate_answer function of XBrainQA + function_response = function_to_call( + chat_id=chat_id, + question=question, + save_answer=False, + ) + messages.append( + { + "tool_call_id": tool_call.id, + "role": "tool", + "name": function_name, + "content": function_response.assistant, + } + ) + + PROMPT_2 = "If initial question can be answered by our conversation messages, then give an answer and end the conversation." + + messages.append({"role": "system", "content": PROMPT_2}) + + for idx, msg in enumerate(messages): + logger.info( + f"Message {idx}: Role - {msg['role']}, Content - {msg['content']}" + ) + + response_after_tools_answers = completion( + model="gpt-3.5-turbo-0125", + messages=messages, + tools=tools, + tool_choice="auto", + ) + + return self.make_recursive_tool_calls( + messages, + question, + chat_id, + tools, + available_functions, + recursive_count=recursive_count + 1, + last_completion_response=response_after_tools_answers.choices[0], + ) + + async def generate_stream( + self, + chat_id: UUID, + question: ChatQuestion, + save_answer: bool, + should_log_steps: Optional[bool] = True, + ): + brain = brain_service.get_brain_by_id(question.brain_id) + if save_answer: + streamed_chat_history = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": "", + "brain_id": question.brain_id, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": streamed_chat_history.message_id, + "message_time": streamed_chat_history.message_time, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "brain_id": str(brain.id) if brain else None, + } + ) + else: + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": None, + "message_time": None, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "brain_id": str(brain.id) if brain else None, + } + ) + + connected_brains = brain_service.get_connected_brains(self.brain_id) + + if not connected_brains: + headlesss_answer = HeadlessQA( + chat_id=chat_id, + model=self.model, + max_tokens=self.max_tokens, + temperature=self.temperature, + streaming=self.streaming, + prompt_id=self.prompt_id, + ).generate_stream(chat_id, question) + + response_tokens = [] + async for value in headlesss_answer: + streamed_chat_history.assistant = value + response_tokens.append(value) + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + if save_answer: + chat_service.update_message_by_id( + message_id=str(streamed_chat_history.message_id), + user_message=question.question, + assistant="".join(response_tokens), + ) + + tools = [] + available_functions = {} + + connected_brains_details = {} + for brain_id in connected_brains: + brain = brain_service.get_brain_by_id(brain_id) + if brain == None: + continue + + tools.append(format_brain_to_tool(brain)) + + available_functions[brain_id] = self.get_answer_generator_from_brain_type( + brain + ) + + connected_brains_details[str(brain.id)] = brain + + CHOOSE_BRAIN_FROM_TOOLS_PROMPT = ( + "Based on the provided user content, find the most appropriate tools to answer" + + "If you can't find any tool to answer and only then, and if you can answer without using any tool. In that case, let the user know that you are not using any particular brain (i.e tool) " + ) + + messages = [{"role": "system", "content": CHOOSE_BRAIN_FROM_TOOLS_PROMPT}] + + history = chat_service.get_chat_history(self.chat_id) + + for message in history: + formatted_message = [ + {"role": "user", "content": message.user_message}, + {"role": "assistant", "content": message.assistant}, + ] + if message.assistant is None: + print(message) + messages.extend(formatted_message) + + messages.append({"role": "user", "content": question.question}) + + initial_response = completion( + model="gpt-3.5-turbo-0125", + stream=True, + messages=messages, + tools=tools, + tool_choice="auto", + ) + + response_tokens = [] + tool_calls_aggregate = [] + for chunk in initial_response: + content = chunk.choices[0].delta.content + if content is not None: + # Need to store it ? + streamed_chat_history.assistant = content + response_tokens.append(chunk.choices[0].delta.content) + + if save_answer: + yield f"data: {json.dumps(streamed_chat_history.dict())}" + else: + yield f"🧠<' {chunk.choices[0].delta.content}" + + if ( + "tool_calls" in chunk.choices[0].delta + and chunk.choices[0].delta.tool_calls is not None + ): + tool_calls = chunk.choices[0].delta.tool_calls + for tool_call in tool_calls: + id = tool_call.id + name = tool_call.function.name + if id and name: + tool_calls_aggregate += [ + { + "id": tool_call.id, + "function": { + "arguments": tool_call.function.arguments, + "name": tool_call.function.name, + }, + "type": "function", + } + ] + + else: + try: + tool_calls_aggregate[tool_call.index]["function"][ + "arguments" + ] += tool_call.function.arguments + except IndexError: + print("TOOL_CALL_INDEX error", tool_call.index) + print("TOOL_CALLS_AGGREGATE error", tool_calls_aggregate) + + finish_reason = chunk.choices[0].finish_reason + + if finish_reason == "stop": + if save_answer: + chat_service.update_message_by_id( + message_id=str(streamed_chat_history.message_id), + user_message=question.question, + assistant="".join( + [ + token + for token in response_tokens + if not token.startswith("🧠<") + ] + ), + ) + break + + if finish_reason == "tool_calls": + messages.append( + { + "role": "assistant", + "tool_calls": tool_calls_aggregate, + "content": None, + } + ) + for tool_call in tool_calls_aggregate: + function_name = tool_call["function"]["name"] + queried_brain = connected_brains_details[function_name] + function_to_call = available_functions[function_name] + function_args = json.loads(tool_call["function"]["arguments"]) + print("function_args", function_args["question"]) + question = ChatQuestion( + question=function_args["question"], brain_id=queried_brain.id + ) + + # yield f"🧠< Querying the brain {queried_brain.name} with the following arguments: {function_args} >🧠", + + print( + f"🧠< Querying the brain {queried_brain.name} with the following arguments: {function_args}", + ) + function_response = function_to_call( + chat_id=chat_id, + question=question, + save_answer=False, + ) + + messages.append( + { + "tool_call_id": tool_call["id"], + "role": "tool", + "name": function_name, + "content": function_response.assistant, + } + ) + + print("messages", messages) + + PROMPT_2 = "If the last user's question can be answered by our conversation messages since then, then give an answer and end the conversation. If you need to ask question to the user to gather more information and give a more accurate answer, then ask the question and wait for the user's answer." + # Otherwise, ask a new question to the assistant and choose brains you would like to ask questions." + + messages.append({"role": "system", "content": PROMPT_2}) + + response_after_tools_answers = completion( + model="gpt-3.5-turbo-0125", + messages=messages, + tools=tools, + tool_choice="auto", + stream=True, + ) + + response_tokens = [] + for chunk in response_after_tools_answers: + print("chunk_response_after_tools_answers", chunk) + content = chunk.choices[0].delta.content + if content: + streamed_chat_history.assistant = content + response_tokens.append(chunk.choices[0].delta.content) + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + finish_reason = chunk.choices[0].finish_reason + + if finish_reason == "stop": + chat_service.update_message_by_id( + message_id=str(streamed_chat_history.message_id), + user_message=question.question, + assistant="".join( + [ + token + for token in response_tokens + if not token.startswith("🧠<") + ] + ), + ) + break + elif finish_reason is not None: + # TODO: recursively call with tools (update prompt + create intermediary function ) + print("NO STOP") + print(chunk.choices[0]) diff --git a/backend/core/quivr_core/api/modules/brain/controller/__init__.py b/backend/core/quivr_core/api/modules/brain/controller/__init__.py new file mode 100644 index 000000000000..7e54fbb96673 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/controller/__init__.py @@ -0,0 +1 @@ +from .brain_routes import brain_router diff --git a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py new file mode 100644 index 000000000000..03757e985383 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py @@ -0,0 +1,210 @@ +from typing import Dict +from uuid import UUID + +from fastapi import APIRouter, Depends, HTTPException, Request + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.dto.inputs import ( + BrainQuestionRequest, + BrainUpdatableProperties, + CreateBrainProperties, +) +from quivr_core.api.modules.brain.entity.brain_entity import PublicBrain, RoleEnum +from quivr_core.api.modules.brain.entity.integration_brain import ( + IntegrationDescriptionEntity, +) +from quivr_core.api.modules.brain.service.brain_authorization_service import ( + has_brain_authorization, +) +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService +from quivr_core.api.modules.brain.service.get_question_context_from_brain import ( + get_question_context_from_brain, +) +from quivr_core.api.modules.brain.service.integration_brain_service import ( + IntegrationBrainDescriptionService, +) +from quivr_core.api.modules.dependencies import get_current_user +from quivr_core.api.modules.prompt.service.prompt_service import PromptService +from quivr_core.api.modules.user.entity.user_identity import UserIdentity +from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry + +logger = get_logger(__name__) +brain_router = APIRouter() + +prompt_service = PromptService() +brain_service = BrainService() +brain_user_service = BrainUserService() +integration_brain_description_service = IntegrationBrainDescriptionService() + + +@brain_router.get( + "/brains/integrations/", +) +async def get_integration_brain_description() -> list[IntegrationDescriptionEntity]: + """Retrieve the integration brain description.""" + return integration_brain_description_service.get_all_integration_descriptions() + + +@brain_router.get("/brains/", tags=["Brain"]) +async def retrieve_all_brains_for_user( + current_user: UserIdentity = Depends(get_current_user), +): + """Retrieve all brains for the current user.""" + brains = brain_user_service.get_user_brains(current_user.id) + return {"brains": brains} + + +@brain_router.get("/brains/public", tags=["Brain"]) +async def retrieve_public_brains() -> list[PublicBrain]: + """Retrieve all Quivr public brains.""" + return brain_service.get_public_brains() + + +@brain_router.get( + "/brains/{brain_id}/", + dependencies=[ + Depends( + has_brain_authorization( + required_roles=[RoleEnum.Owner, RoleEnum.Editor, RoleEnum.Viewer] + ) + ), + ], + tags=["Brain"], +) +async def retrieve_brain_by_id( + brain_id: UUID, + current_user: UserIdentity = Depends(get_current_user), +): + """Retrieve details of a specific brain by its ID.""" + brain_details = brain_service.get_brain_details(brain_id, current_user.id) + if brain_details is None: + raise HTTPException(status_code=404, detail="Brain details not found") + return brain_details + + +@brain_router.post("/brains/", tags=["Brain"]) +async def create_new_brain( + brain: CreateBrainProperties, + request: Request, + current_user: UserIdentity = Depends(get_current_user), +): + """Create a new brain for the user.""" + + maybe_send_telemetry("create_brain", {"brain_name": brain.name}, request) + new_brain = brain_service.create_brain( + brain=brain, + user_id=current_user.id, + ) + brain_user_service.create_brain_user( + user_id=current_user.id, + brain_id=new_brain.brain_id, + rights=RoleEnum.Owner, + is_default_brain=True, + ) + + return {"id": new_brain.brain_id, "name": brain.name, "rights": "Owner"} + + +@brain_router.put( + "/brains/{brain_id}/", + dependencies=[], + tags=["Brain"], +) +async def update_existing_brain( + brain_id: UUID, + brain_update_data: BrainUpdatableProperties, + current_user: UserIdentity = Depends(get_current_user), +): + """Update an existing brain's configuration.""" + existing_brain = brain_service.get_brain_details(brain_id, current_user.id) + if existing_brain is None: + raise HTTPException(status_code=404, detail="Brain not found") + + if brain_update_data.prompt_id is None and existing_brain.prompt_id: + prompt = prompt_service.get_prompt_by_id(existing_brain.prompt_id) + if prompt and prompt.status == "private": + prompt_service.delete_prompt_by_id(existing_brain.prompt_id) + + return {"message": f"Prompt {brain_id} has been updated."} + + elif brain_update_data.status == "private" and existing_brain.status == "public": + brain_user_service.delete_brain_users(brain_id) + return {"message": f"Brain {brain_id} has been deleted."} + + else: + brain_service.update_brain_by_id(brain_id, brain_update_data) + + return {"message": f"Brain {brain_id} has been updated."} + + +@brain_router.put( + "/brains/{brain_id}/secrets-values", + dependencies=[], + tags=["Brain"], +) +async def update_existing_brain_secrets( + brain_id: UUID, + secrets: Dict[str, str], + current_user: UserIdentity = Depends(get_current_user), +): + """Update an existing brain's secrets.""" + + existing_brain = brain_service.get_brain_details(brain_id, None) + + if existing_brain is None: + raise HTTPException(status_code=404, detail="Brain not found") + + if ( + existing_brain.brain_definition is None + or existing_brain.brain_definition.secrets is None + ): + raise HTTPException( + status_code=400, + detail="This brain does not support secrets.", + ) + + is_brain_user = ( + brain_user_service.get_brain_for_user( + user_id=current_user.id, + brain_id=brain_id, + ) + is not None + ) + + if not is_brain_user: + raise HTTPException( + status_code=403, + detail="You are not authorized to update this brain.", + ) + + secrets_names = [secret.name for secret in existing_brain.brain_definition.secrets] + + for key, value in secrets.items(): + if key not in secrets_names: + raise HTTPException( + status_code=400, + detail=f"Secret {key} is not a valid secret.", + ) + if value: + brain_service.update_secret_value( + user_id=current_user.id, + brain_id=brain_id, + secret_name=key, + secret_value=value, + ) + + return {"message": f"Brain {brain_id} has been updated."} + + +@brain_router.post( + "/brains/{brain_id}/documents", + tags=["Brain"], +) +async def get_question_context_for_brain( + brain_id: UUID, question: BrainQuestionRequest +): + # TODO: Move this endpoint to AnswerGenerator service + """Retrieve the question context from a specific brain.""" + context = get_question_context_from_brain(brain_id, question.question) + return {"docs": context} diff --git a/backend/core/quivr_core/api/modules/brain/dto/__init__.py b/backend/core/quivr_core/api/modules/brain/dto/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/dto/inputs.py b/backend/core/quivr_core/api/modules/brain/dto/inputs.py new file mode 100644 index 000000000000..068751ac510f --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/dto/inputs.py @@ -0,0 +1,88 @@ +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainAllowedMethods, + ApiBrainDefinitionEntity, + ApiBrainDefinitionSchema, + ApiBrainDefinitionSecret, +) +from quivr_core.api.modules.brain.entity.brain_entity import BrainType +from quivr_core.api.modules.brain.entity.integration_brain import IntegrationType + +logger = get_logger(__name__) + + +class CreateApiBrainDefinition(BaseModel, extra="ignore"): + method: ApiBrainAllowedMethods + url: str + params: Optional[ApiBrainDefinitionSchema] = ApiBrainDefinitionSchema() + search_params: ApiBrainDefinitionSchema = ApiBrainDefinitionSchema() + secrets: Optional[list[ApiBrainDefinitionSecret]] = [] + raw: Optional[bool] = False + jq_instructions: Optional[str] = None + + +class CreateIntegrationBrain(BaseModel, extra="ignore"): + integration_name: str + integration_logo_url: str + connection_settings: dict + integration_type: IntegrationType + description: str + max_files: int + + +class BrainIntegrationSettings(BaseModel, extra="ignore"): + integration_id: str + settings: dict + + +class BrainIntegrationUpdateSettings(BaseModel, extra="ignore"): + settings: dict + + +class CreateBrainProperties(BaseModel, extra="ignore"): + name: Optional[str] = "Default brain" + description: str = "This is a description" + status: Optional[str] = "private" + model: Optional[str] = None + temperature: Optional[float] = 0.0 + max_tokens: Optional[int] = 2000 + prompt_id: Optional[UUID] = None + brain_type: Optional[BrainType] = BrainType.doc + brain_definition: Optional[CreateApiBrainDefinition] = None + brain_secrets_values: Optional[dict] = {} + connected_brains_ids: Optional[list[UUID]] = [] + integration: Optional[BrainIntegrationSettings] = None + + def dict(self, *args, **kwargs): + brain_dict = super().dict(*args, **kwargs) + if brain_dict.get("prompt_id"): + brain_dict["prompt_id"] = str(brain_dict.get("prompt_id")) + return brain_dict + + +class BrainUpdatableProperties(BaseModel, extra="ignore"): + name: Optional[str] = None + description: Optional[str] = None + temperature: Optional[float] = None + model: Optional[str] = None + max_tokens: Optional[int] = None + status: Optional[str] = None + prompt_id: Optional[UUID] = None + brain_definition: Optional[ApiBrainDefinitionEntity] = None + connected_brains_ids: Optional[list[UUID]] = [] + integration: Optional[BrainIntegrationUpdateSettings] = None + + def dict(self, *args, **kwargs): + brain_dict = super().dict(*args, **kwargs) + if brain_dict.get("prompt_id"): + brain_dict["prompt_id"] = str(brain_dict.get("prompt_id")) + return brain_dict + + +class BrainQuestionRequest(BaseModel): + question: str diff --git a/backend/core/quivr_core/api/modules/brain/entity/__init__.py b/backend/core/quivr_core/api/modules/brain/entity/__init__.py new file mode 100644 index 000000000000..ba083445ccf1 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/entity/__init__.py @@ -0,0 +1 @@ +from .api_brain_definition_entity import ApiBrainDefinitionEntity diff --git a/backend/core/quivr_core/api/modules/brain/entity/api_brain_definition_entity.py b/backend/core/quivr_core/api/modules/brain/entity/api_brain_definition_entity.py new file mode 100644 index 000000000000..d327d44ac3ba --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/entity/api_brain_definition_entity.py @@ -0,0 +1,47 @@ +from enum import Enum +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel, Extra + + +class ApiBrainDefinitionSchemaProperty(BaseModel, extra=Extra.forbid): + type: str + description: str + enum: Optional[list] = None + name: str + + def dict(self, **kwargs): + result = super().dict(**kwargs) + if "enum" in result and result["enum"] is None: + del result["enum"] + return result + + +class ApiBrainDefinitionSchema(BaseModel, extra=Extra.forbid): + properties: list[ApiBrainDefinitionSchemaProperty] = [] + required: list[str] = [] + + +class ApiBrainDefinitionSecret(BaseModel, extra=Extra.forbid): + name: str + type: str + description: Optional[str] = None + + +class ApiBrainAllowedMethods(str, Enum): + GET = "GET" + POST = "POST" + PUT = "PUT" + DELETE = "DELETE" + + +class ApiBrainDefinitionEntity(BaseModel, extra=Extra.forbid): + brain_id: UUID + method: ApiBrainAllowedMethods + url: str + params: ApiBrainDefinitionSchema + search_params: ApiBrainDefinitionSchema + secrets: list[ApiBrainDefinitionSecret] + raw: bool = False + jq_instructions: Optional[str] = None diff --git a/backend/core/quivr_core/api/modules/brain/entity/brain_entity.py b/backend/core/quivr_core/api/modules/brain/entity/brain_entity.py new file mode 100644 index 000000000000..bf963fcccae1 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/entity/brain_entity.py @@ -0,0 +1,135 @@ +from datetime import datetime +from enum import Enum +from typing import List, Optional +from uuid import UUID + +from pydantic import BaseModel + +# from sqlmodel import Enum as PGEnum +from sqlalchemy.dialects.postgresql import ENUM as PGEnum +from sqlalchemy.ext.asyncio import AsyncAttrs +from sqlmodel import TIMESTAMP, Column, Field, Relationship, SQLModel, text +from sqlmodel import UUID as PGUUID + +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainDefinitionEntity, +) +from quivr_core.api.modules.brain.entity.integration_brain import ( + IntegrationDescriptionEntity, + IntegrationEntity, +) +from quivr_core.api.modules.prompt.entity.prompt import Prompt + + +class BrainType(str, Enum): + doc = "doc" + api = "api" + composite = "composite" + integration = "integration" + + +class Brain(AsyncAttrs, SQLModel, table=True): + __tablename__ = "brains" # type: ignore + + brain_id: UUID | None = Field( + default=None, + sa_column=Column( + PGUUID, + server_default=text("uuid_generate_v4()"), + primary_key=True, + ), + ) + name: str + description: str + status: str | None = None + model: str | None = None + max_tokens: int | None = None + temperature: float | None = None + last_update: datetime | None = Field( + default=None, + sa_column=Column( + TIMESTAMP(timezone=False), + server_default=text("CURRENT_TIMESTAMP"), + ), + ) + brain_type: BrainType | None = Field( + sa_column=Column( + PGEnum(BrainType, name="brain_type_enum", create_type=False), + default=BrainType.integration, + ), + ) + brain_chat_history: List["ChatHistory"] = Relationship( # noqa: F821 + back_populates="brain", sa_relationship_kwargs={"lazy": "select"} + ) + prompt_id: UUID | None = Field(default=None, foreign_key="prompts.id") + prompt: Prompt | None = Relationship( # noqa: f821 + back_populates="brain", sa_relationship_kwargs={"lazy": "joined"} + ) + + # TODO : add + # "meaning" "public"."vector", + # "tags" "public"."tags"[] + + +class BrainEntity(BaseModel): + brain_id: UUID + name: str + description: Optional[str] = None + temperature: Optional[float] = None + model: Optional[str] = None + max_tokens: Optional[int] = None + status: Optional[str] = None + prompt_id: Optional[UUID] = None + last_update: datetime + brain_type: BrainType + brain_definition: Optional[ApiBrainDefinitionEntity] = None + connected_brains_ids: Optional[List[UUID]] = None + raw: Optional[bool] = None + jq_instructions: Optional[str] = None + integration: Optional[IntegrationEntity] = None + integration_description: Optional[IntegrationDescriptionEntity] = None + + @property + def id(self) -> UUID: + return self.brain_id + + def dict(self, **kwargs): + data = super().dict( + **kwargs, + ) + data["id"] = self.id + return data + + +class PublicBrain(BaseModel): + id: UUID + name: str + description: Optional[str] = None + number_of_subscribers: int = 0 + last_update: str + brain_type: BrainType + brain_definition: Optional[ApiBrainDefinitionEntity] = None + + +class RoleEnum(str, Enum): + Viewer = "Viewer" + Editor = "Editor" + Owner = "Owner" + + +class BrainUser(BaseModel): + id: UUID + user_id: UUID + rights: RoleEnum + default_brain: bool = False + + +class MinimalUserBrainEntity(BaseModel): + id: UUID + name: str + rights: RoleEnum + status: str + brain_type: BrainType + description: str + integration_logo_url: str + max_files: int diff --git a/backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py b/backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py new file mode 100644 index 000000000000..bb976112eaa5 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py @@ -0,0 +1,8 @@ +from uuid import UUID + +from pydantic import BaseModel + + +class CompositeBrainConnectionEntity(BaseModel): + composite_brain_id: UUID + connected_brain_id: UUID diff --git a/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py b/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py new file mode 100644 index 000000000000..61d46fd204b2 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py @@ -0,0 +1,46 @@ +from enum import Enum +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel + + +class IntegrationType(str, Enum): + CUSTOM = "custom" + SYNC = "sync" + DOC = "doc" + + +class IntegrationBrainTag(str, Enum): + NEW = "new" + RECOMMENDED = "recommended" + MOST_POPULAR = "most_popular" + PREMIUM = "premium" + COMING_SOON = "coming_soon" + COMMUNITY = "community" + DEPRECATED = "deprecated" + + +class IntegrationDescriptionEntity(BaseModel): + id: UUID + integration_name: str + integration_logo_url: Optional[str] = None + connection_settings: Optional[dict] = None + integration_type: IntegrationType + tags: Optional[list[IntegrationBrainTag]] = [] + information: Optional[str] = None + description: str + max_files: int + allow_model_change: bool + integration_display_name: str + onboarding_brain: bool + + +class IntegrationEntity(BaseModel): + id: int + user_id: str + brain_id: str + integration_id: str + settings: Optional[dict] = None + credentials: Optional[dict] = None + last_synced: str diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py new file mode 100644 index 000000000000..ed09ceac6bd0 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py @@ -0,0 +1,146 @@ +import json +from typing import AsyncIterable +from uuid import UUID + +from langchain.chains import ConversationalRetrievalChain, LLMChain +from langchain.chains.question_answering import load_qa_chain +from langchain_community.chat_models import ChatLiteLLM +from langchain_core.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain_core.prompts.prompt import PromptTemplate + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.chat.dto.chats import ChatQuestion + +logger = get_logger(__name__) + + +class BigBrain(KnowledgeBrainQA): + """ + The BigBrain class integrates advanced conversational retrieval and language model chains + to provide comprehensive and context-aware responses to user queries. + + It leverages a combination of document retrieval, question condensation, and document-based + question answering to generate responses that are informed by a wide range of knowledge sources. + """ + + def __init__( + self, + **kwargs, + ): + """ + Initializes the BigBrain class with specific configurations. + + Args: + **kwargs: Arbitrary keyword arguments. + """ + super().__init__( + **kwargs, + ) + + def get_chain(self): + """ + Constructs and returns the conversational QA chain used by BigBrain. + + Returns: + A ConversationalRetrievalChain instance. + """ + system_template = """Combine these summaries in a way that makes sense and answer the user's question. + Use markdown or any other techniques to display the content in a nice and aerated way. Answer in the language of the question. + Here are user instructions on how to respond: {custom_personality} + ______________________ + {summaries}""" + messages = [ + SystemMessagePromptTemplate.from_template(system_template), + HumanMessagePromptTemplate.from_template("{question}"), + ] + CHAT_COMBINE_PROMPT = ChatPromptTemplate.from_messages(messages) + + ### Question prompt + question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. + Return any relevant text verbatim. Return the answer in the same language as the question. If the answer is not in the text, just say nothing in the same language as the question. + {context} + Question: {question} + Relevant text, if any, else say Nothing:""" + QUESTION_PROMPT = PromptTemplate( + template=question_prompt_template, input_variables=["context", "question"] + ) + + ### Condense Question Prompt + + _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question in exactly the same language as the original question. + + Chat History: + {chat_history} + Follow Up Input: {question} + Standalone question in same language as question:""" + CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) + + api_base = None + if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): + api_base = self.brain_settings.ollama_api_base_url + + llm = ChatLiteLLM( + temperature=0, + model=self.model, + api_base=api_base, + max_tokens=self.max_tokens, + ) + + retriever_doc = self.knowledge_qa.get_retriever() + + question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) + doc_chain = load_qa_chain( + llm, + chain_type="map_reduce", + question_prompt=QUESTION_PROMPT, + combine_prompt=CHAT_COMBINE_PROMPT, + ) + + chain = ConversationalRetrievalChain( + retriever=retriever_doc, + question_generator=question_generator, + combine_docs_chain=doc_chain, + ) + + return chain + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + """ + Generates a stream of responses for a given question in real-time. + + Args: + chat_id (UUID): The unique identifier for the chat session. + question (ChatQuestion): The question object containing the user's query. + save_answer (bool): Flag indicating whether to save the answer to the chat history. + + Returns: + An asynchronous iterable of response strings. + """ + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + response_tokens = [] + + async for chunk in conversational_qa_chain.astream( + { + "question": question.question, + "chat_history": transformed_history, + "custom_personality": ( + self.prompt_to_use.content if self.prompt_to_use else None + ), + } + ): + if "answer" in chunk: + response_tokens.append(chunk["answer"]) + streamed_chat_history.assistant = chunk["answer"] + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Big/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Big/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py new file mode 100644 index 000000000000..c667e4ae4615 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py @@ -0,0 +1,101 @@ +import json +from typing import AsyncIterable +from uuid import UUID + +from langchain_community.chat_models import ChatLiteLLM +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.chat.dto.chats import ChatQuestion + + +class ClaudeBrain(KnowledgeBrainQA): + """ + ClaudeBrain integrates with Claude model to provide conversational AI capabilities. + It leverages the Claude model for generating responses based on the provided context. + + Attributes: + **kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization. + """ + + def __init__( + self, + **kwargs, + ): + """ + Initializes the ClaudeBrain with the given arguments. + + Args: + **kwargs: Arbitrary keyword arguments. + """ + super().__init__( + **kwargs, + ) + + def calculate_pricing(self): + """ + Calculates the pricing for using the ClaudeBrain. + + Returns: + int: The pricing value. + """ + return 3 + + def get_chain(self): + """ + Constructs and returns the conversational chain for ClaudeBrain. + + Returns: + A conversational chain object. + """ + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are Claude powered by Quivr. You are an assistant. {custom_personality}", + ), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + + chain = prompt | ChatLiteLLM( + model="claude-3-haiku-20240307", max_tokens=self.max_tokens + ) + + return chain + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + """ + Generates a stream of responses for the given question. + + Args: + chat_id (UUID): The chat session ID. + question (ChatQuestion): The question object. + save_answer (bool): Whether to save the answer. + + Yields: + AsyncIterable: A stream of response strings. + """ + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + response_tokens = [] + + async for chunk in conversational_qa_chain.astream( + { + "question": question.question, + "chat_history": transformed_history, + "custom_personality": ( + self.prompt_to_use.content if self.prompt_to_use else None + ), + } + ): + response_tokens.append(chunk.content) + streamed_chat_history.assistant = chunk.content + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Claude/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Claude/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py new file mode 100644 index 000000000000..100109544a6c --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py @@ -0,0 +1,283 @@ +import json +import operator +from typing import Annotated, AsyncIterable, List, Optional, Sequence, TypedDict +from uuid import UUID + +from langchain.tools import BaseTool +from langchain_core.messages import BaseMessage, ToolMessage +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.tools import BaseTool +from langchain_openai import ChatOpenAI +from langgraph.graph import END, StateGraph +from langgraph.prebuilt import ToolExecutor, ToolInvocation + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.chat.dto.chats import ChatQuestion +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service +from quivr_core.api.modules.tools import ( + EmailSenderTool, + ImageGeneratorTool, + URLReaderTool, + WebSearchTool, +) + + +class AgentState(TypedDict): + messages: Annotated[Sequence[BaseMessage], operator.add] + + +logger = get_logger(__name__) + +chat_service = get_service(ChatService)() + + +class GPT4Brain(KnowledgeBrainQA): + """ + GPT4Brain integrates with GPT-4 to provide real-time answers and supports various tools to enhance its capabilities. + + Available Tools: + - WebSearchTool: Performs web searches to find relevant information. + - ImageGeneratorTool: Generates images based on textual descriptions. + - URLReaderTool: Reads and summarizes content from URLs. + - EmailSenderTool: Sends emails with specified content. + + Use Cases: + - WebSearchTool can be used to find the latest news articles on a specific topic or to gather information from various websites. + - ImageGeneratorTool is useful for creating visual content based on textual prompts, such as generating a company logo based on a description. + - URLReaderTool can be used to summarize articles or web pages, making it easier to quickly understand the content without reading the entire text. + - EmailSenderTool enables automated email sending, such as sending a summary of a meeting's minutes to all participants. + """ + + tools: Optional[List[BaseTool]] = None + tool_executor: Optional[ToolExecutor] = None + function_model: ChatOpenAI = None + + def __init__( + self, + **kwargs, + ): + super().__init__( + **kwargs, + ) + self.tools = [ + WebSearchTool(), + ImageGeneratorTool(), + URLReaderTool(), + EmailSenderTool(user_email=self.user_email), + ] + self.tool_executor = ToolExecutor(tools=self.tools) + + def calculate_pricing(self): + return 3 + + def should_continue(self, state): + messages = state["messages"] + last_message = messages[-1] + # Make sure there is a previous message + + if last_message.tool_calls: + name = last_message.tool_calls[0]["name"] + if name == "image-generator": + return "final" + # If there is no function call, then we finish + if not last_message.tool_calls: + return "end" + # Otherwise if there is, we check if it's suppose to return direct + else: + return "continue" + + # Define the function that calls the model + def call_model(self, state): + messages = state["messages"] + response = self.function_model.invoke(messages) + # We return a list, because this will get added to the existing list + return {"messages": [response]} + + # Define the function to execute tools + def call_tool(self, state): + messages = state["messages"] + # Based on the continue condition + # we know the last message involves a function call + last_message = messages[-1] + # We construct an ToolInvocation from the function_call + tool_call = last_message.tool_calls[0] + tool_name = tool_call["name"] + arguments = tool_call["args"] + + action = ToolInvocation( + tool=tool_call["name"], + tool_input=tool_call["args"], + ) + # We call the tool_executor and get back a response + response = self.tool_executor.invoke(action) + # We use the response to create a FunctionMessage + function_message = ToolMessage( + content=str(response), name=action.tool, tool_call_id=tool_call["id"] + ) + # We return a list, because this will get added to the existing list + return {"messages": [function_message]} + + def create_graph(self): + # Define a new graph + workflow = StateGraph(AgentState) + + # Define the two nodes we will cycle between + workflow.add_node("agent", self.call_model) + workflow.add_node("action", self.call_tool) + workflow.add_node("final", self.call_tool) + + # Set the entrypoint as `agent` + # This means that this node is the first one called + workflow.set_entry_point("agent") + + # We now add a conditional edge + workflow.add_conditional_edges( + # First, we define the start node. We use `agent`. + # This means these are the edges taken after the `agent` node is called. + "agent", + # Next, we pass in the function that will determine which node is called next. + self.should_continue, + # Finally we pass in a mapping. + # The keys are strings, and the values are other nodes. + # END is a special node marking that the graph should finish. + # What will happen is we will call `should_continue`, and then the output of that + # will be matched against the keys in this mapping. + # Based on which one it matches, that node will then be called. + { + # If `tools`, then we call the tool node. + "continue": "action", + # Final call + "final": "final", + # Otherwise we finish. + "end": END, + }, + ) + + # We now add a normal edge from `tools` to `agent`. + # This means that after `tools` is called, `agent` node is called next. + workflow.add_edge("action", "agent") + workflow.add_edge("final", END) + + # Finally, we compile it! + # This compiles it into a LangChain Runnable, + # meaning you can use it as you would any other runnable + app = workflow.compile() + return app + + def get_chain(self): + self.function_model = ChatOpenAI(model="gpt-4o", temperature=0, streaming=True) + + self.function_model = self.function_model.bind_tools(self.tools) + + graph = self.create_graph() + + return graph + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + filtered_history = self.filter_history(transformed_history, 40, 2000) + response_tokens = [] + config = {"metadata": {"conversation_id": str(chat_id)}} + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", + ), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + prompt_formated = prompt.format_messages( + chat_history=filtered_history, + question=question.question, + custom_personality=( + self.prompt_to_use.content if self.prompt_to_use else None + ), + ) + + async for event in conversational_qa_chain.astream_events( + {"messages": prompt_formated}, + config=config, + version="v1", + ): + kind = event["event"] + if kind == "on_chat_model_stream": + content = event["data"]["chunk"].content + if content: + # Empty content in the context of OpenAI or Anthropic usually means + # that the model is asking for a tool to be invoked. + # So we only print non-empty content + response_tokens.append(content) + streamed_chat_history.assistant = content + yield f"data: {json.dumps(streamed_chat_history.dict())}" + elif kind == "on_tool_start": + print("--") + print( + f"Starting tool: {event['name']} with inputs: {event['data'].get('input')}" + ) + elif kind == "on_tool_end": + print(f"Done tool: {event['name']}") + print(f"Tool output was: {event['data'].get('output')}") + print("--") + elif kind == "on_chain_end": + output = event["data"]["output"] + final_output = [item for item in output if "final" in item] + if final_output: + if ( + final_output[0]["final"]["messages"][0].name + == "image-generator" + ): + final_message = final_output[0]["final"]["messages"][0].content + response_tokens.append(final_message) + streamed_chat_history.assistant = final_message + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) + + def generate_answer( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> GetChatHistoryOutput: + conversational_qa_chain = self.get_chain() + transformed_history, _ = self.initialize_streamed_chat_history( + chat_id, question + ) + filtered_history = self.filter_history(transformed_history, 40, 2000) + config = {"metadata": {"conversation_id": str(chat_id)}} + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", + ), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + prompt_formated = prompt.format_messages( + chat_history=filtered_history, + question=question.question, + custom_personality=( + self.prompt_to_use.content if self.prompt_to_use else None + ), + ) + model_response = conversational_qa_chain.invoke( + {"messages": prompt_formated}, + config=config, + ) + + answer = model_response["messages"][-1].content + + return self.save_non_streaming_answer( + chat_id=chat_id, question=question, answer=answer, metadata={} + ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/GPT4/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/GPT4/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py new file mode 100644 index 000000000000..8b5aafdd89ca --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py @@ -0,0 +1,205 @@ +import datetime +from operator import itemgetter +from typing import List + +from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate +from langchain_community.chat_models import ChatLiteLLM +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate, PromptTemplate +from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 +from langchain_core.pydantic_v1 import Field as FieldV1 +from langchain_core.runnables import RunnableLambda, RunnablePassthrough +from langchain_openai import ChatOpenAI + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA + +logger = get_logger(__name__) + + +class cited_answer(BaseModelV1): + """Answer the user question based only on the given sources, and cite the sources used.""" + + thoughts: str = FieldV1( + ..., + description="""Description of the thought process, based only on the given sources. + Cite the text as much as possible and give the document name it appears in. In the format : 'Doc_name states : cited_text'. Be the most + procedural as possible.""", + ) + answer: str = FieldV1( + ..., + description="The answer to the user question, which is based only on the given sources.", + ) + citations: List[int] = FieldV1( + ..., + description="The integer IDs of the SPECIFIC sources which justify the answer.", + ) + + thoughts: str = FieldV1( + ..., + description="Explain shortly what you did to find the answer and what you used by citing the sources by their name.", + ) + followup_questions: List[str] = FieldV1( + ..., + description="Generate up to 3 follow-up questions that could be asked based on the answer given or context provided.", + ) + + +# First step is to create the Rephrasing Prompt +_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. + +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question:""" +CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) + +# Next is the answering prompt + +template_answer = """ +Context: +{context} + +User Question: {question} +Answer: +""" + +today_date = datetime.datetime.now().strftime("%B %d, %Y") + +system_message_template = ( + f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}." +) + +system_message_template += """ +When answering use markdown neat. +Answer in a concise and clear manner. +Use the following pieces of context from files provided by the user to answer the users. +Answer in the same language as the user question. +If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer. +Don't cite the source id in the answer objects, but you can use the source to answer the question. +You have access to the files to answer the user question (limited to first 20 files): +{files} + +If not None, User instruction to follow to answer: {custom_instructions} +Don't cite the source id in the answer objects, but you can use the source to answer the question. +""" + + +ANSWER_PROMPT = ChatPromptTemplate.from_messages( + [ + SystemMessagePromptTemplate.from_template(system_message_template), + HumanMessagePromptTemplate.from_template(template_answer), + ] +) + + +# How we format documents + +DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template( + template="Source: {index} \n {page_content}" +) + + +class MultiContractBrain(KnowledgeBrainQA): + """ + The MultiContract class integrates advanced conversational retrieval and language model chains + to provide comprehensive and context-aware responses to user queries. + + It leverages a combination of document retrieval, question condensation, and document-based + question answering to generate responses that are informed by a wide range of knowledge sources. + """ + + def __init__( + self, + **kwargs, + ): + """ + Initializes the MultiContract class with specific configurations. + + Args: + **kwargs: Arbitrary keyword arguments. + """ + super().__init__( + **kwargs, + ) + + def get_chain(self): + list_files_array = ( + self.knowledge_qa.knowledge_service.get_all_knowledge_in_brain( + self.brain_id + ) + ) # pyright: ignore reportPrivateUsage=none + + list_files_array = [file.file_name for file in list_files_array] + # Max first 10 files + if len(list_files_array) > 20: + list_files_array = list_files_array[:20] + + list_files = "\n".join(list_files_array) if list_files_array else "None" + + retriever_doc = self.knowledge_qa.get_retriever() + + loaded_memory = RunnablePassthrough.assign( + chat_history=RunnableLambda( + lambda x: self.filter_history(x["chat_history"]), + ), + question=lambda x: x["question"], + ) + + api_base = None + if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): + api_base = self.brain_settings.ollama_api_base_url + + standalone_question = { + "standalone_question": { + "question": lambda x: x["question"], + "chat_history": itemgetter("chat_history"), + } + | CONDENSE_QUESTION_PROMPT + | ChatLiteLLM(temperature=0, model=self.model, api_base=api_base) + | StrOutputParser(), + } + + knowledge_qa = self.knowledge_qa + prompt_custom_user = knowledge_qa.prompt_to_use() + prompt_to_use = "None" + if prompt_custom_user: + prompt_to_use = prompt_custom_user.content + + # Now we retrieve the documents + retrieved_documents = { + "docs": itemgetter("standalone_question") | retriever_doc, + "question": lambda x: x["standalone_question"], + "custom_instructions": lambda x: prompt_to_use, + } + + final_inputs = { + "context": lambda x: self.knowledge_qa._combine_documents(x["docs"]), + "question": itemgetter("question"), + "custom_instructions": itemgetter("custom_instructions"), + "files": lambda x: list_files, + } + llm = ChatLiteLLM( + max_tokens=self.max_tokens, + model=self.model, + temperature=self.temperature, + api_base=api_base, + ) # pyright: ignore reportPrivateUsage=none + if self.model_compatible_with_function_calling(self.model): + # And finally, we do the part that returns the answers + llm_function = ChatOpenAI( + max_tokens=self.max_tokens, + model=self.model, + temperature=self.temperature, + ) + llm = llm_function.bind_tools( + [cited_answer], + tool_choice="cited_answer", + ) + + answer = { + "answer": final_inputs | ANSWER_PROMPT | llm, + "docs": itemgetter("docs"), + } + + return loaded_memory | standalone_question | retrieved_documents | answer diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py new file mode 100644 index 000000000000..3e7f61c4bd02 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py @@ -0,0 +1,25 @@ +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA + + +class NotionBrain(KnowledgeBrainQA): + """ + NotionBrain integrates with Notion to provide knowledge-based responses. + It leverages data stored in Notion to answer user queries. + + Attributes: + **kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization. + """ + + def __init__( + self, + **kwargs, + ): + """ + Initializes the NotionBrain with the given arguments. + + Args: + **kwargs: Arbitrary keyword arguments. + """ + super().__init__( + **kwargs, + ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py b/backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py new file mode 100644 index 000000000000..2fca462c2c98 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py @@ -0,0 +1,393 @@ +import os +import tempfile +import time +from io import BytesIO +from typing import Any, List, Optional + +import requests +from fastapi import UploadFile +from pydantic import BaseModel + +from quivr_core.api.celery_config import celery +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.integration_brain import IntegrationEntity +from quivr_core.api.modules.brain.repository.integration_brains import ( + Integration, + IntegrationBrain, +) +from quivr_core.api.modules.knowledge.dto.inputs import CreateKnowledgeProperties +from quivr_core.api.modules.knowledge.repository.knowledge_interface import ( + KnowledgeInterface, +) +from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService +from quivr_core.api.modules.upload.service.upload_file import upload_file_storage + +logger = get_logger(__name__) + + +class NotionPage(BaseModel): + """Represents a Notion Page object to be used in the NotionConnector class""" + + id: str + created_time: str + last_edited_time: str + archived: bool + properties: dict[str, Any] + url: str + + +class NotionSearchResponse(BaseModel): + """Represents the response from the Notion Search API""" + + results: list[dict[str, Any]] + next_cursor: Optional[str] = None + has_more: bool = False + + +class NotionConnector(IntegrationBrain, Integration): + """A class to interact with the Notion API""" + + credentials: dict[str, str] = None + integration_details: IntegrationEntity = None + brain_id: str = None + user_id: str = None + knowledge_service: KnowledgeInterface + recursive_index_enabled: bool = False + max_pages: int = 100 + + def __init__(self, brain_id: str, user_id: str): + super().__init__() + self.brain_id = brain_id + self.user_id = user_id + self._load_credentials() + self.knowledge_service = KnowledgeService() + + def _load_credentials(self) -> dict[str, str]: + """Load the Notion credentials""" + self.integration_details = self.get_integration_brain(self.brain_id) + if self.credentials is None: + logger.info("Loading Notion credentials") + self.integration_details.credentials = { + "notion_integration_token": self.integration_details.settings.get( + "notion_integration_token", "" + ) + } + self.update_integration_brain( + self.brain_id, self.user_id, self.integration_details + ) + self.credentials = self.integration_details.credentials + else: # pragma: no cover + self.credentials = self.integration_details.credentials + + def _headers(self) -> dict[str, str]: + """Get the headers for the Notion API""" + return { + "Authorization": f'Bearer {self.credentials["notion_integration_token"]}', + "Content-Type": "application/json", + "Notion-Version": "2022-06-28", + } + + def _search_notion(self, query_dict: dict[str, Any]) -> NotionSearchResponse: + """ + Search for pages from a Notion database. + """ + # Use self.credentials to authenticate the request + headers = self._headers() + res = requests.post( + "https://api.notion.com/v1/search", + headers=headers, + json=query_dict, + # Adjust the timeout as needed + timeout=10, + ) + res.raise_for_status() + return NotionSearchResponse(**res.json()) + + def _fetch_blocks(self, page_id: str, cursor: str | None = None) -> dict[str, Any]: + """ + Fetch the blocks of a Notion page. + """ + logger.info(f"Fetching blocks for page: {page_id}") + headers = self._headers() + query_params = None if not cursor else {"start_cursor": cursor} + res = requests.get( + f"https://api.notion.com/v1/blocks/{page_id}/children", + params=query_params, + headers=headers, + timeout=10, + ) + res.raise_for_status() + return res.json() + + def _fetch_page(self, page_id: str) -> dict[str, Any]: + """ + Fetch a Notion page. + """ + logger.info(f"Fetching page: {page_id}") + headers = self._headers() + block_url = f"https://api.notion.com/v1/pages/{page_id}" + res = requests.get( + block_url, + headers=headers, + timeout=10, + ) + try: + res.raise_for_status() + except Exception: + logger.exception(f"Error fetching page - {res.json()}") + return None + return NotionPage(**res.json()) + + def _read_blocks( + self, page_block_id: str + ) -> tuple[list[tuple[str, str]], list[str]]: + """Reads blocks for a page""" + result_lines: list[tuple[str, str]] = [] + child_pages: list[str] = [] + cursor = None + while True: + data = self._fetch_blocks(page_block_id, cursor) + + for result in data["results"]: + result_block_id = result["id"] + result_type = result["type"] + result_obj = result[result_type] + + cur_result_text_arr = [] + if "rich_text" in result_obj: + for rich_text in result_obj["rich_text"]: + # skip if doesn't have text object + if "text" in rich_text: + text = rich_text["text"]["content"] + cur_result_text_arr.append(text) + + if result["has_children"]: + if result_type == "child_page": + child_pages.append(result_block_id) + else: + logger.info(f"Entering sub-block: {result_block_id}") + subblock_result_lines, subblock_child_pages = self._read_blocks( + result_block_id + ) + logger.info(f"Finished sub-block: {result_block_id}") + result_lines.extend(subblock_result_lines) + child_pages.extend(subblock_child_pages) + + # if result_type == "child_database" and self.recursive_index_enabled: + # child_pages.extend(self._read_pages_from_database(result_block_id)) + + cur_result_text = "\n".join(cur_result_text_arr) + if cur_result_text: + result_lines.append((cur_result_text, result_block_id)) + + if data["next_cursor"] is None: + break + + cursor = data["next_cursor"] + + return result_lines, child_pages + + def _read_page_title(self, page: NotionPage) -> str: + """Extracts the title from a Notion page""" + page_title = None + for _, prop in page.properties.items(): + if prop["type"] == "title" and len(prop["title"]) > 0: + page_title = " ".join([t["plain_text"] for t in prop["title"]]).strip() + break + if page_title is None: + page_title = f"Untitled Page [{page.id}]" + page_title = "".join(e for e in page_title if e.isalnum()) + return page_title + + def _read_page_url(self, page: NotionPage) -> str: + """Extracts the URL from a Notion page""" + return page.url + + def _read_pages_from_database(self, database_id: str) -> list[str]: + """Reads pages from a Notion database""" + headers = self._headers() + res = requests.post( + f"https://api.notion.com/v1/databases/{database_id}/query", + headers=headers, + timeout=10, + ) + res.raise_for_status() + return [page["id"] for page in res.json()["results"]] + + def _read_page(self, page_id: str) -> tuple[str, list[str]]: + """Reads a Notion page""" + page = self._fetch_page(page_id) + if page is None: + return None, None, None, None + page_title = self._read_page_title(page) + page_content, child_pages = self._read_blocks(page_id) + page_url = self._read_page_url(page) + return page_title, page_content, child_pages, page_url + + def _filter_pages_by_time( + self, + pages: list[dict[str, Any]], + start: str, + filter_field: str = "last_edited_time", + ) -> list[NotionPage]: + filtered_pages: list[NotionPage] = [] + start_time = time.mktime( + time.strptime(start, "%Y-%m-%dT%H:%M:%S.%f%z") + ) # Convert `start` to a float + for page in pages: + compare_time = time.mktime( + time.strptime(page[filter_field], "%Y-%m-%dT%H:%M:%S.%f%z") + ) + if compare_time > start_time: # Compare `compare_time` with `start_time` + filtered_pages += [NotionPage(**page)] + return filtered_pages + + def get_all_pages(self) -> list[NotionPage]: + """ + Get all the pages from Notion. + """ + query_dict = { + "filter": {"property": "object", "value": "page"}, + "page_size": 100, + } + max_pages = self.max_pages + pages_count = 0 + while True: + search_response = self._search_notion(query_dict) + for page in search_response.results: + pages_count += 1 + if pages_count > max_pages: + break + yield NotionPage(**page) + + if search_response.has_more: + query_dict["start_cursor"] = search_response.next_cursor + else: + break + + def add_file_to_knowledge( + self, page_content: List[tuple[str, str]], page_name: str, page_url: str + ): + """ + Add a file to the knowledge base + """ + logger.info(f"Adding file to knowledge: {page_name}") + filename_with_brain_id = ( + str(self.brain_id) + "/" + str(page_name) + "_notion.txt" + ) + try: + concatened_page_content = "" + if page_content: + for content in page_content: + concatened_page_content += content[0] + "\n" + + # Create a BytesIO object from the content + content_io = BytesIO(concatened_page_content.encode("utf-8")) + + # Create a file of type UploadFile + file = UploadFile(filename=filename_with_brain_id, file=content_io) + + # Write the UploadFile content to a temporary file + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + temp_file.write(file.file.read()) + temp_file_path = temp_file.name + + # Upload the temporary file to the knowledge base + response = upload_file_storage( + temp_file_path, filename_with_brain_id, "true" + ) + logger.info(f"File {response} uploaded successfully") + + # Delete the temporary file + os.remove(temp_file_path) + + knowledge_to_add = CreateKnowledgeProperties( + brain_id=self.brain_id, + file_name=page_name + "_notion.txt", + extension="txt", + integration="notion", + integration_link=page_url, + ) + + added_knowledge = self.knowledge_service.add_knowledge(knowledge_to_add) + logger.info(f"Knowledge {added_knowledge} added successfully") + + celery.send_task( + "process_file_and_notify", + kwargs={ + "file_name": filename_with_brain_id, + "file_original_name": page_name + "_notion.txt", + "brain_id": self.brain_id, + "delete_file": True, + }, + ) + except Exception: + logger.error("Error adding knowledge") + + def load(self): + """ + Get all the pages, blocks, databases from Notion into a single document per page + """ + all_pages = list(self.get_all_pages()) # Convert generator to list + documents = [] + for page in all_pages: + logger.info(f"Reading page: {page.id}") + page_title, page_content, child_pages, page_url = self._read_page(page.id) + document = { + "page_title": page_title, + "page_content": page_content, + "child_pages": child_pages, + "page_url": page_url, + } + documents.append(document) + self.add_file_to_knowledge(page_content, page_title, page_url) + return documents + + def poll(self): + """ + Update all the brains with the latest data from Notion + """ + integration = self.get_integration_brain(self.brain_id) + last_synced = integration.last_synced + + query_dict = { + "page_size": self.max_pages, + "sort": {"timestamp": "last_edited_time", "direction": "descending"}, + "filter": {"property": "object", "value": "page"}, + } + documents = [] + + while True: + db_res = self._search_notion(query_dict) + pages = self._filter_pages_by_time( + db_res.results, last_synced, filter_field="last_edited_time" + ) + for page in pages: + logger.info(f"Reading page: {page.id}") + page_title, page_content, child_pages, page_url = self._read_page( + page.id + ) + document = { + "page_title": page_title, + "page_content": page_content, + "child_pages": child_pages, + "page_url": page_url, + } + documents.append(document) + self.add_file_to_knowledge(page_content, page_title, page_url) + if not db_res.has_more: + break + query_dict["start_cursor"] = db_res.next_cursor + logger.info( + f"last Synced: {self.update_last_synced(self.brain_id, self.user_id)}" + ) + return documents + + +if __name__ == "__main__": + notion = NotionConnector( + brain_id="73f7d092-d596-4fd0-b24f-24031e9b53cd", + user_id="39418e3b-0258-4452-af60-7acfcc1263ff", + ) + + print(notion.poll()) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Notion/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Notion/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py new file mode 100644 index 000000000000..a121b7596683 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py @@ -0,0 +1,135 @@ +import json +from typing import AsyncIterable +from uuid import UUID + +from langchain_community.chat_models import ChatLiteLLM +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.chat.dto.chats import ChatQuestion +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service + +logger = get_logger(__name__) + +chat_service = get_service(ChatService)() + + +class ProxyBrain(KnowledgeBrainQA): + """ + ProxyBrain class serves as a proxy to utilize various language models for generating responses. + It dynamically selects and uses the appropriate language model based on the provided context and question. + """ + + def __init__( + self, + **kwargs, + ): + """ + Initializes the ProxyBrain with the given arguments. + + Args: + **kwargs: Arbitrary keyword arguments. + """ + super().__init__( + **kwargs, + ) + + def get_chain(self): + """ + Constructs and returns the conversational chain for ProxyBrain. + + Returns: + A conversational chain object. + """ + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are Quivr. You are an assistant. {custom_personality}", + ), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + + chain = prompt | ChatLiteLLM(model=self.model, max_tokens=self.max_tokens) + + return chain + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + """ + Generates a stream of responses for the given question. + + Args: + chat_id (UUID): The chat session ID. + question (ChatQuestion): The question object. + save_answer (bool): Whether to save the answer. + + Yields: + AsyncIterable: A stream of response strings. + """ + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + response_tokens = [] + config = {"metadata": {"conversation_id": str(chat_id)}} + + async for chunk in conversational_qa_chain.astream( + { + "question": question.question, + "chat_history": transformed_history, + "custom_personality": ( + self.prompt_to_use.content if self.prompt_to_use else None + ), + }, + config=config, + ): + response_tokens.append(chunk.content) + streamed_chat_history.assistant = chunk.content + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) + + def generate_answer( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> GetChatHistoryOutput: + """ + Generates a non-streaming answer for the given question. + + Args: + chat_id (UUID): The chat session ID. + question (ChatQuestion): The question object. + save_answer (bool): Whether to save the answer. + + Returns: + GetChatHistoryOutput: The chat history output object containing the answer. + """ + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + config = {"metadata": {"conversation_id": str(chat_id)}} + model_response = conversational_qa_chain.invoke( + { + "question": question.question, + "chat_history": transformed_history, + "custom_personality": ( + self.prompt_to_use.content if self.prompt_to_use else None + ), + }, + config=config, + ) + + answer = model_response.content + + return self.save_non_streaming_answer( + chat_id=chat_id, + question=question, + answer=answer, + ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Proxy/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Proxy/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py new file mode 100644 index 000000000000..9559c658665b --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py @@ -0,0 +1,104 @@ +import json +from typing import AsyncIterable +from uuid import UUID + +from langchain_community.chat_models import ChatLiteLLM +from langchain_community.utilities import SQLDatabase +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.runnables import RunnablePassthrough + +from quivr_core.api.modules.brain.integrations.SQL.SQL_connector import SQLConnector +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.brain.repository.integration_brains import IntegrationBrain +from quivr_core.api.modules.chat.dto.chats import ChatQuestion + + +class SQLBrain(KnowledgeBrainQA, IntegrationBrain): + """This is the Notion brain class. it is a KnowledgeBrainQA has the data is stored locally. + It is going to call the Data Store internally to get the data. + + Args: + KnowledgeBrainQA (_type_): A brain that store the knowledge internaly + """ + + uri: str = None + db: SQLDatabase = None + sql_connector: SQLConnector = None + + def __init__( + self, + **kwargs, + ): + super().__init__( + **kwargs, + ) + self.sql_connector = SQLConnector(self.brain_id, self.user_id) + + def get_schema(self, _): + return self.db.get_table_info() + + def run_query(self, query): + return self.db.run(query) + + def get_chain(self): + template = """Based on the table schema below, write a SQL query that would answer the user's question: + {schema} + + Question: {question} + SQL Query:""" + prompt = ChatPromptTemplate.from_template(template) + + self.db = SQLDatabase.from_uri(self.sql_connector.credentials["uri"]) + + api_base = None + if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): + api_base = self.brain_settings.ollama_api_base_url + + model = ChatLiteLLM(model=self.model, api_base=api_base) + + sql_response = ( + RunnablePassthrough.assign(schema=self.get_schema) + | prompt + | model.bind(stop=["\nSQLResult:"]) + | StrOutputParser() + ) + + template = """Based on the table schema below, question, sql query, and sql response, write a natural language response and the query that was used to generate it.: + {schema} + + Question: {question} + SQL Query: {query} + SQL Response: {response}""" + prompt_response = ChatPromptTemplate.from_template(template) + + full_chain = ( + RunnablePassthrough.assign(query=sql_response).assign( + schema=self.get_schema, + response=lambda x: self.db.run(x["query"]), + ) + | prompt_response + | model + ) + + return full_chain + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + response_tokens = [] + + async for chunk in conversational_qa_chain.astream( + { + "question": question.question, + } + ): + response_tokens.append(chunk.content) + streamed_chat_history.assistant = chunk.content + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py b/backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py new file mode 100644 index 000000000000..671cee947b64 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py @@ -0,0 +1,41 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.integration_brain import IntegrationEntity +from quivr_core.api.modules.brain.repository.integration_brains import IntegrationBrain +from quivr_core.api.modules.knowledge.repository.knowledge_interface import ( + KnowledgeInterface, +) +from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService + +logger = get_logger(__name__) + + +class SQLConnector(IntegrationBrain): + """A class to interact with an SQL database""" + + credentials: dict[str, str] = None + integration_details: IntegrationEntity = None + brain_id: str = None + user_id: str = None + knowledge_service: KnowledgeInterface + + def __init__(self, brain_id: str, user_id: str): + super().__init__() + self.brain_id = brain_id + self.user_id = user_id + self._load_credentials() + self.knowledge_service = KnowledgeService() + + def _load_credentials(self) -> dict[str, str]: + """Load the Notion credentials""" + self.integration_details = self.get_integration_brain(self.brain_id) + if self.credentials is None: + logger.info("Loading Notion credentials") + self.integration_details.credentials = { + "uri": self.integration_details.settings.get("uri", "") + } + self.update_integration_brain( + self.brain_id, self.user_id, self.integration_details + ) + self.credentials = self.integration_details.credentials + else: # pragma: no cover + self.credentials = self.integration_details.credentials diff --git a/backend/core/quivr_core/api/modules/brain/integrations/SQL/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/SQL/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py new file mode 100644 index 000000000000..e0fed77d68ba --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py @@ -0,0 +1,487 @@ +import json +from typing import AsyncIterable, List +from uuid import UUID + +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ( + ChatPromptTemplate, + MessagesPlaceholder, + PromptTemplate, +) +from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 +from langchain_core.pydantic_v1 import Field as FieldV1 +from langchain_openai import ChatOpenAI +from langgraph.graph import END, StateGraph +from typing_extensions import TypedDict + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.chat.dto.chats import ChatQuestion +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service + + +# Post-processing +def format_docs(docs): + return "\n\n".join(doc.page_content for doc in docs) + + +class GraphState(TypedDict): + """ + Represents the state of our graph. + + Attributes: + question: question + generation: LLM generation + documents: list of documents + """ + + question: str + generation: str + documents: List[str] + + +# Data model +class GradeDocuments(BaseModelV1): + """Binary score for relevance check on retrieved documents.""" + + binary_score: str = FieldV1( + description="Documents are relevant to the question, 'yes' or 'no'" + ) + + +class GradeHallucinations(BaseModelV1): + """Binary score for hallucination present in generation answer.""" + + binary_score: str = FieldV1( + description="Answer is grounded in the facts, 'yes' or 'no'" + ) + + +# Data model +class GradeAnswer(BaseModelV1): + """Binary score to assess answer addresses question.""" + + binary_score: str = FieldV1( + description="Answer addresses the question, 'yes' or 'no'" + ) + + +logger = get_logger(__name__) + +chat_service = get_service(ChatService)() + + +class SelfBrain(KnowledgeBrainQA): + """ + GPT4Brain integrates with GPT-4 to provide real-time answers and supports various tools to enhance its capabilities. + + Available Tools: + - WebSearchTool: Performs web searches to find relevant information. + - ImageGeneratorTool: Generates images based on textual descriptions. + - URLReaderTool: Reads and summarizes content from URLs. + - EmailSenderTool: Sends emails with specified content. + + Use Cases: + - WebSearchTool can be used to find the latest news articles on a specific topic or to gather information from various websites. + - ImageGeneratorTool is useful for creating visual content based on textual prompts, such as generating a company logo based on a description. + - URLReaderTool can be used to summarize articles or web pages, making it easier to quickly understand the content without reading the entire text. + - EmailSenderTool enables automated email sending, such as sending a summary of a meeting's minutes to all participants. + """ + + max_input: int = 10000 + + def __init__( + self, + **kwargs, + ): + super().__init__( + **kwargs, + ) + + def calculate_pricing(self): + return 3 + + def retrieval_grade(self): + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm_grader = llm.with_structured_output(GradeDocuments) + + # Prompt + system = """You are a grader assessing relevance of a retrieved document to a user question. \n + It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n + If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n + Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.""" + grade_prompt = ChatPromptTemplate.from_messages( + [ + ("system", system), + ( + "human", + "Retrieved document: \n\n {document} \n\n User question: {question}", + ), + ] + ) + + retrieval_grader = grade_prompt | structured_llm_grader + + return retrieval_grader + + def generation_rag(self): + # Prompt + human_prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. + + Question: {question} + + Context: {context} + + Answer: + """ + prompt_human = PromptTemplate.from_template(human_prompt) + # LLM + llm = ChatOpenAI(model="gpt-4o", temperature=0) + + # Chain + rag_chain = prompt_human | llm | StrOutputParser() + + return rag_chain + + def hallucination_grader(self): + # LLM with function call + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm_grader = llm.with_structured_output(GradeHallucinations) + + # Prompt + system = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved facts. \n + Give a binary score 'yes' or 'no'. 'Yes' means that the answer is grounded in / supported by the set of facts.""" + hallucination_prompt = ChatPromptTemplate.from_messages( + [ + ("system", system), + ( + "human", + "Set of facts: \n\n {documents} \n\n LLM generation: {generation}", + ), + ] + ) + + hallucination_grader = hallucination_prompt | structured_llm_grader + + return hallucination_grader + + def answer_grader(self): + # LLM with function call + llm = ChatOpenAI(model="gpt-4o", temperature=0) + structured_llm_grader = llm.with_structured_output(GradeAnswer) + + # Prompt + system = """You are a grader assessing whether an answer addresses / resolves a question \n + Give a binary score 'yes' or 'no'. Yes' means that the answer resolves the question.""" + answer_prompt = ChatPromptTemplate.from_messages( + [ + ("system", system), + ( + "human", + "User question: \n\n {question} \n\n LLM generation: {generation}", + ), + ] + ) + + answer_grader = answer_prompt | structured_llm_grader + + return answer_grader + + def question_rewriter(self): + # LLM + llm = ChatOpenAI(model="gpt-4o", temperature=0) + + # Prompt + system = """You a question re-writer that converts an input question to a better version that is optimized \n + for vectorstore retrieval. Look at the input and try to reason about the underlying semantic intent / meaning.""" + re_write_prompt = ChatPromptTemplate.from_messages( + [ + ("system", system), + ( + "human", + "Here is the initial question: \n\n {question} \n Formulate an improved question.", + ), + ] + ) + + question_rewriter = re_write_prompt | llm | StrOutputParser() + + return question_rewriter + + def get_chain(self): + graph = self.create_graph() + + return graph + + def create_graph(self): + workflow = StateGraph(GraphState) + + # Define the nodes + workflow.add_node("retrieve", self.retrieve) # retrieve + workflow.add_node("grade_documents", self.grade_documents) # grade documents + workflow.add_node("generate", self.generate) # generatae + workflow.add_node("transform_query", self.transform_query) # transform_query + + # Build graph + workflow.set_entry_point("retrieve") + workflow.add_edge("retrieve", "grade_documents") + workflow.add_conditional_edges( + "grade_documents", + self.decide_to_generate, + { + "transform_query": "transform_query", + "generate": "generate", + }, + ) + workflow.add_edge("transform_query", "retrieve") + workflow.add_conditional_edges( + "generate", + self.grade_generation_v_documents_and_question, + { + "not supported": "generate", + "useful": END, + "not useful": "transform_query", + }, + ) + + # Compile + app = workflow.compile() + return app + + def retrieve(self, state): + """ + Retrieve documents + + Args: + state (dict): The current graph state + + Returns: + state (dict): New key added to state, documents, that contains retrieved documents + """ + print("---RETRIEVE---") + logger.info("Retrieving documents") + question = state["question"] + logger.info(f"Question: {question}") + + # Retrieval + retriever = self.knowledge_qa.get_retriever() + documents = retriever.get_relevant_documents(question) + return {"documents": documents, "question": question} + + def generate(self, state): + """ + Generate answer + + Args: + state (dict): The current graph state + + Returns: + state (dict): New key added to state, generation, that contains LLM generation + """ + print("---GENERATE---") + question = state["question"] + documents = state["documents"] + + formatted_docs = format_docs(documents) + # RAG generation + generation = self.generation_rag().invoke( + {"context": formatted_docs, "question": question} + ) + return {"documents": documents, "question": question, "generation": generation} + + def grade_documents(self, state): + """ + Determines whether the retrieved documents are relevant to the question. + + Args: + state (dict): The current graph state + + Returns: + state (dict): Updates documents key with only filtered relevant documents + """ + + print("---CHECK DOCUMENT RELEVANCE TO QUESTION---") + question = state["question"] + documents = state["documents"] + + # Score each doc + filtered_docs = [] + for d in documents: + score = self.retrieval_grade().invoke( + {"question": question, "document": d.page_content} + ) + grade = score.binary_score + if grade == "yes": + print("---GRADE: DOCUMENT RELEVANT---") + filtered_docs.append(d) + else: + print("---GRADE: DOCUMENT NOT RELEVANT---") + continue + return {"documents": filtered_docs, "question": question} + + def transform_query(self, state): + """ + Transform the query to produce a better question. + + Args: + state (dict): The current graph state + + Returns: + state (dict): Updates question key with a re-phrased question + """ + + print("---TRANSFORM QUERY---") + question = state["question"] + documents = state["documents"] + + # Re-write question + better_question = self.question_rewriter().invoke({"question": question}) + return {"documents": documents, "question": better_question} + + def decide_to_generate(self, state): + """ + Determines whether to generate an answer, or re-generate a question. + + Args: + state (dict): The current graph state + + Returns: + str: Binary decision for next node to call + """ + + print("---ASSESS GRADED DOCUMENTS---") + question = state["question"] + filtered_documents = state["documents"] + + if not filtered_documents: + # All documents have been filtered check_relevance + # We will re-generate a new query + print( + "---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---" + ) + return "transform_query" + else: + # We have relevant documents, so generate answer + print("---DECISION: GENERATE---") + return "generate" + + def grade_generation_v_documents_and_question(self, state): + """ + Determines whether the generation is grounded in the document and answers question. + + Args: + state (dict): The current graph state + + Returns: + str: Decision for next node to call + """ + + print("---CHECK HALLUCINATIONS---") + question = state["question"] + documents = state["documents"] + generation = state["generation"] + + score = self.hallucination_grader().invoke( + {"documents": documents, "generation": generation} + ) + grade = score.binary_score + + # Check hallucination + if grade == "yes": + print("---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---") + # Check question-answering + print("---GRADE GENERATION vs QUESTION---") + score = self.answer_grader().invoke( + {"question": question, "generation": generation} + ) + grade = score.binary_score + if grade == "yes": + print("---DECISION: GENERATION ADDRESSES QUESTION---") + return "useful" + else: + print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---") + return "not useful" + else: + print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---") + return "not supported" + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + conversational_qa_chain = self.get_chain() + transformed_history, streamed_chat_history = ( + self.initialize_streamed_chat_history(chat_id, question) + ) + filtered_history = self.filter_history(transformed_history, 40, 2000) + response_tokens = [] + config = {"metadata": {"conversation_id": str(chat_id)}} + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", + ), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + prompt_formated = prompt.format_messages( + chat_history=filtered_history, + question=question.question, + custom_personality=( + self.prompt_to_use.content if self.prompt_to_use else None + ), + ) + + async for event in conversational_qa_chain.astream( + {"question": question.question}, config=config + ): + for key, value in event.items(): + if "generation" in value and value["generation"] != "": + response_tokens.append(value["generation"]) + streamed_chat_history.assistant = value["generation"] + + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) + + def generate_answer( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> GetChatHistoryOutput: + conversational_qa_chain = self.get_chain() + transformed_history, _ = self.initialize_streamed_chat_history( + chat_id, question + ) + filtered_history = self.filter_history(transformed_history, 40, 2000) + config = {"metadata": {"conversation_id": str(chat_id)}} + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", + ), + MessagesPlaceholder(variable_name="chat_history"), + ("human", "{question}"), + ] + ) + prompt_formated = prompt.format_messages( + chat_history=filtered_history, + question=question.question, + custom_personality=( + self.prompt_to_use.content if self.prompt_to_use else None + ), + ) + model_response = conversational_qa_chain.invoke( + {"messages": prompt_formated}, + config=config, + ) + + answer = model_response["messages"][-1].content + + return self.save_non_streaming_answer( + chat_id=chat_id, question=question, answer=answer, metadata={} + ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Self/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Self/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py b/backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py new file mode 100644 index 000000000000..556677abbb4d --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py @@ -0,0 +1,513 @@ +import json +from typing import AsyncIterable, List, Optional +from uuid import UUID + +from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler +from pydantic import BaseModel, ConfigDict +from pydantic_settings import BaseSettings + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import BrainSettings +from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity +from quivr_core.api.modules.brain.qa_interface import ( + QAInterface, + model_compatible_with_function_calling, +) +from quivr_core.api.modules.brain.rags.quivr_rag import QuivrRAG +from quivr_core.api.modules.brain.rags.rag_interface import RAGInterface +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.utils.format_chat_history import ( + format_chat_history, +) +from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( + get_prompt_to_use_id, +) +from quivr_core.api.modules.chat.controller.chat.utils import ( + find_model_and_generate_metadata, + update_user_usage, +) +from quivr_core.api.modules.chat.dto.chats import ChatQuestion, Sources +from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.prompt.service.get_prompt_to_use import get_prompt_to_use +from quivr_core.api.modules.upload.service.generate_file_signed_url import ( + generate_file_signed_url, +) +from quivr_core.api.modules.user.service.user_usage import UserUsage + +logger = get_logger(__name__) +QUIVR_DEFAULT_PROMPT = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer." + +brain_service = BrainService() + + +def is_valid_uuid(uuid_to_test, version=4): + try: + uuid_obj = UUID(uuid_to_test, version=version) + except ValueError: + return False + + return str(uuid_obj) == uuid_to_test + + +def generate_source( + source_documents, + brain_id: UUID, + citations: List[int] | None = None, +): + """ + Generate the sources list for the answer + It takes in a list of sources documents and citations that points to the docs index that was used in the answer + """ + # Initialize an empty list for sources + sources_list: List[Sources] = [] + + # Initialize a dictionary for storing generated URLs + generated_urls = {} + + # remove duplicate sources with same name and create a list of unique sources + sources_url_cache = {} + + # Get source documents from the result, default to an empty list if not found + + # If source documents exist + if source_documents: + logger.info(f"Citations {citations}") + # Iterate over each document + for doc, index in zip(source_documents, range(len(source_documents))): + logger.info(f"Processing source document {doc.metadata['file_name']}") + if citations is not None: + if index not in citations: + logger.info(f"Skipping source document {doc.metadata['file_name']}") + continue + # Check if 'url' is in the document metadata + is_url = ( + "original_file_name" in doc.metadata + and doc.metadata["original_file_name"] is not None + and doc.metadata["original_file_name"].startswith("http") + ) + + # Determine the name based on whether it's a URL or a file + name = ( + doc.metadata["original_file_name"] + if is_url + else doc.metadata["file_name"] + ) + + # Determine the type based on whether it's a URL or a file + type_ = "url" if is_url else "file" + + # Determine the source URL based on whether it's a URL or a file + if is_url: + source_url = doc.metadata["original_file_name"] + else: + file_path = f"{brain_id}/{doc.metadata['file_name']}" + # Check if the URL has already been generated + if file_path in generated_urls: + source_url = generated_urls[file_path] + else: + # Generate the URL + if file_path in sources_url_cache: + source_url = sources_url_cache[file_path] + else: + generated_url = generate_file_signed_url(file_path) + if generated_url is not None: + source_url = generated_url.get("signedURL", "") + else: + source_url = "" + # Store the generated URL + generated_urls[file_path] = source_url + + # Append a new Sources object to the list + sources_list.append( + Sources( + name=name, + type=type_, + source_url=source_url, + original_file_name=name, + citation=doc.page_content, + ) + ) + else: + logger.info("No source documents found or source_documents is not a list.") + return sources_list + + +class KnowledgeBrainQA(BaseModel, QAInterface): + """ + Main class for the Brain Picking functionality. + It allows to initialize a Chat model, generate questions and retrieve answers using ConversationalRetrievalChain. + It has two main methods: `generate_question` and `generate_stream`. + One is for generating questions in a single request, the other is for generating questions in a streaming fashion. + Both are the same, except that the streaming version streams the last message as a stream. + Each have the same prompt template, which is defined in the `prompt_template` property. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + # Instantiate settings + brain_settings: BaseSettings = BrainSettings() + + # TODO: remove this !!!!! Only added for compatibility + chat_service: ChatService + + # Default class attributes + model: str = "gpt-3.5-turbo-0125" # pyright: ignore reportPrivateUsage=none + temperature: float = 0.1 + chat_id: str = None # pyright: ignore reportPrivateUsage=none + brain_id: str = None # pyright: ignore reportPrivateUsage=none + max_tokens: int = 2000 + max_input: int = 2000 + streaming: bool = False + knowledge_qa: Optional[RAGInterface] = None + brain: Optional[BrainEntity] = None + user_id: str = None + user_email: str = None + user_usage: Optional[UserUsage] = None + user_settings: Optional[dict] = None + models_settings: Optional[List[dict]] = None + metadata: Optional[dict] = None + + callbacks: List[AsyncIteratorCallbackHandler] = None # pyright: ignore reportPrivateUsage=none + + prompt_id: Optional[UUID] = None + + def __init__( + self, + brain_id: str, + chat_id: str, + chat_service: ChatService, + user_id: str = None, + user_email: str = None, + streaming: bool = False, + prompt_id: Optional[UUID] = None, + metadata: Optional[dict] = None, + cost: int = 100, + **kwargs, + ): + super().__init__( + brain_id=brain_id, + chat_id=chat_id, + chat_service=chat_service, + streaming=streaming, + **kwargs, + ) + self.chat_service = chat_service + self.prompt_id = prompt_id + self.user_id = user_id + self.user_email = user_email + self.user_usage = UserUsage(id=user_id, email=user_email) + # TODO: we already have a brain before !!! + self.brain = brain_service.get_brain_by_id(brain_id) + self.user_settings = self.user_usage.get_user_settings() + + # Get Model settings for the user + self.models_settings = self.user_usage.get_models() + self.increase_usage_user() + self.knowledge_qa = QuivrRAG( + model=self.brain.model if self.brain.model else self.model, + brain_id=brain_id, + chat_id=chat_id, + streaming=streaming, + max_input=self.max_input, + max_tokens=self.max_tokens, + **kwargs, + ) # type: ignore + + @property + def prompt_to_use(self): + if self.brain_id and is_valid_uuid(self.brain_id): + return get_prompt_to_use(UUID(self.brain_id), self.prompt_id) + else: + return None + + @property + def prompt_to_use_id(self) -> Optional[UUID]: + # TODO: move to prompt service or instruction or something + if self.brain_id and is_valid_uuid(self.brain_id): + return get_prompt_to_use_id(UUID(self.brain_id), self.prompt_id) + else: + return None + + def filter_history( + self, chat_history, max_history: int = 10, max_tokens: int = 2000 + ): + """ + Filter out the chat history to only include the messages that are relevant to the current question + + Takes in a chat_history= [HumanMessage(content='Qui est Chloé ? '), AIMessage(content="Chloé est une salariée travaillant pour l'entreprise Quivr en tant qu'AI Engineer, sous la direction de son supérieur hiérarchique, Stanislas Girard."), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content=''), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content="Désolé, je n'ai pas d'autres informations sur Chloé à partir des fichiers fournis.")] + Returns a filtered chat_history with in priority: first max_tokens, then max_history where a Human message and an AI message count as one pair + a token is 4 characters + """ + chat_history = chat_history[::-1] + total_tokens = 0 + total_pairs = 0 + filtered_chat_history = [] + for i in range(0, len(chat_history), 2): + if i + 1 < len(chat_history): + human_message = chat_history[i] + ai_message = chat_history[i + 1] + message_tokens = ( + len(human_message.content) + len(ai_message.content) + ) // 4 + if ( + total_tokens + message_tokens > max_tokens + or total_pairs >= max_history + ): + break + filtered_chat_history.append(human_message) + filtered_chat_history.append(ai_message) + total_tokens += message_tokens + total_pairs += 1 + chat_history = filtered_chat_history[::-1] + + return chat_history + + def increase_usage_user(self): + # Raises an error if the user has consumed all of of his credits + + update_user_usage( + usage=self.user_usage, + user_settings=self.user_settings, + cost=self.calculate_pricing(), + ) + + def calculate_pricing(self): + model_to_use = find_model_and_generate_metadata( + self.brain.model, + self.user_settings, + self.models_settings, + ) + self.model = model_to_use.name + self.max_input = model_to_use.max_input + self.max_tokens = model_to_use.max_output + user_choosen_model_price = 1000 + + for model_setting in self.models_settings: + if model_setting["name"] == self.model: + user_choosen_model_price = model_setting["price"] + + return user_choosen_model_price + + # TODO: deprecated + async def generate_answer( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> GetChatHistoryOutput: + conversational_qa_chain = self.knowledge_qa.get_chain() + transformed_history, _ = await self.initialize_streamed_chat_history( + chat_id, question + ) + metadata = self.metadata or {} + citations = None + answer = "" + config = {"metadata": {"conversation_id": str(chat_id)}} + + model_response = conversational_qa_chain.invoke( + { + "question": question.question, + "chat_history": transformed_history, + "custom_personality": ( + self.prompt_to_use.content if self.prompt_to_use else None + ), + }, + config=config, + ) + + if model_compatible_with_function_calling(model=self.model): + if model_response["answer"].tool_calls: + citations = model_response["answer"].tool_calls[-1]["args"]["citations"] + followup_questions = model_response["answer"].tool_calls[-1]["args"][ + "followup_questions" + ] + thoughts = model_response["answer"].tool_calls[-1]["args"]["thoughts"] + if citations: + citations = citations + if followup_questions: + metadata["followup_questions"] = followup_questions + if thoughts: + metadata["thoughts"] = thoughts + answer = model_response["answer"].tool_calls[-1]["args"]["answer"] + else: + answer = model_response["answer"].content + + sources = model_response["docs"] or [] + + if len(sources) > 0: + sources_list = generate_source(sources, self.brain_id, citations=citations) + serialized_sources_list = [source.dict() for source in sources_list] + metadata["sources"] = serialized_sources_list + + return self.save_non_streaming_answer( + chat_id=chat_id, question=question, answer=answer, metadata=metadata + ) + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + if hasattr(self, "get_chain") and callable(self.get_chain): + conversational_qa_chain = self.get_chain() + else: + conversational_qa_chain = self.knowledge_qa.get_chain() + ( + transformed_history, + streamed_chat_history, + ) = await self.initialize_streamed_chat_history(chat_id, question) + response_tokens = "" + sources = [] + citations = [] + first = True + config = {"metadata": {"conversation_id": str(chat_id)}} + + async for chunk in conversational_qa_chain.astream( + { + "question": question.question, + "chat_history": transformed_history, + "custom_personality": ( + self.prompt_to_use.content if self.prompt_to_use else None + ), + }, + config=config, + ): + if not streamed_chat_history.metadata: + streamed_chat_history.metadata = {} + if model_compatible_with_function_calling(model=self.model): + if chunk.get("answer"): + if first: + gathered = chunk["answer"] + first = False + else: + gathered = gathered + chunk["answer"] + if ( + gathered.tool_calls + and gathered.tool_calls[-1].get("args") + and "answer" in gathered.tool_calls[-1]["args"] + ): + # Only send the difference between answer and response_tokens which was the previous answer + answer = gathered.tool_calls[-1]["args"]["answer"] + difference = answer[len(response_tokens) :] + streamed_chat_history.assistant = difference + response_tokens = answer + + yield f"data: {json.dumps(streamed_chat_history.dict())}" + if ( + gathered.tool_calls + and gathered.tool_calls[-1].get("args") + and "citations" in gathered.tool_calls[-1]["args"] + ): + citations = gathered.tool_calls[-1]["args"]["citations"] + if ( + gathered.tool_calls + and gathered.tool_calls[-1].get("args") + and "followup_questions" in gathered.tool_calls[-1]["args"] + ): + followup_questions = gathered.tool_calls[-1]["args"][ + "followup_questions" + ] + streamed_chat_history.metadata["followup_questions"] = ( + followup_questions + ) + if ( + gathered.tool_calls + and gathered.tool_calls[-1].get("args") + and "thoughts" in gathered.tool_calls[-1]["args"] + ): + thoughts = gathered.tool_calls[-1]["args"]["thoughts"] + streamed_chat_history.metadata["thoughts"] = thoughts + else: + if chunk.get("answer"): + response_tokens += chunk["answer"].content + streamed_chat_history.assistant = chunk["answer"].content + yield f"data: {streamed_chat_history.model_dump_json()}" + + if chunk.get("docs"): + sources = chunk["docs"] + + sources_list = generate_source(sources, self.brain_id, citations) + + # Serialize the sources list + serialized_sources_list = [source.dict() for source in sources_list] + streamed_chat_history.metadata["sources"] = serialized_sources_list + yield f"data: {streamed_chat_history.model_dump_json()}" + self.save_answer(question, response_tokens, streamed_chat_history, save_answer) + + async def initialize_streamed_chat_history(self, chat_id, question): + history = await self.chat_service.get_chat_history(self.chat_id) + transformed_history = format_chat_history(history) + brain = brain_service.get_brain_by_id(self.brain_id) + + streamed_chat_history = self.chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": "", + "brain_id": brain.brain_id, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": streamed_chat_history.message_id, + "message_time": streamed_chat_history.message_time, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": brain.name if brain else None, + "brain_id": str(brain.brain_id) if brain else None, + "metadata": self.metadata, + } + ) + + return transformed_history, streamed_chat_history + + def save_answer( + self, question, response_tokens, streamed_chat_history, save_answer + ): + assistant = "".join(response_tokens) + + try: + if save_answer: + self.chat_service.update_message_by_id( + message_id=str(streamed_chat_history.message_id), + user_message=question.question, + assistant=assistant, + metadata=streamed_chat_history.metadata, + ) + except Exception as e: + logger.error("Error updating message by ID: %s", e) + + def save_non_streaming_answer(self, chat_id, question, answer, metadata): + new_chat = self.chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "brain_id": self.brain.brain_id, + "prompt_id": self.prompt_to_use_id, + "metadata": metadata, + } + ) + ) + + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "message_time": new_chat.message_time, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": self.brain.name if self.brain else None, + "message_id": new_chat.message_id, + "brain_id": str(self.brain.brain_id) if self.brain else None, + "metadata": metadata, + } + ) diff --git a/backend/core/quivr_core/api/modules/brain/qa_headless.py b/backend/core/quivr_core/api/modules/brain/qa_headless.py new file mode 100644 index 000000000000..0ea9cad03681 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/qa_headless.py @@ -0,0 +1,270 @@ +import asyncio +import json +from typing import AsyncIterable, Awaitable, List, Optional +from uuid import UUID + +from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler +from langchain.chains import LLMChain +from langchain.chat_models.base import BaseChatModel +from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate +from langchain_community.chat_models import ChatLiteLLM +from pydantic import BaseModel, ConfigDict + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import ( + BrainSettings, +) + +# Importing settings related to the 'brain' +from quivr_core.api.modules.brain.qa_interface import QAInterface +from quivr_core.api.modules.brain.service.utils.format_chat_history import ( + format_chat_history, + format_history_to_openai_mesages, +) +from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( + get_prompt_to_use_id, +) +from quivr_core.api.modules.chat.dto.chats import ChatQuestion +from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service +from quivr_core.api.modules.prompt.service.get_prompt_to_use import get_prompt_to_use + +logger = get_logger(__name__) +SYSTEM_MESSAGE = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer.When answering use markdown or any other techniques to display the content in a nice and aerated way." +chat_service = get_service(ChatService)() + + +class HeadlessQA(BaseModel, QAInterface): + brain_settings = BrainSettings() + model: str + temperature: float = 0.0 + max_tokens: int = 2000 + streaming: bool = False + chat_id: str + callbacks: Optional[List[AsyncIteratorCallbackHandler]] = None + prompt_id: Optional[UUID] = None + + def _determine_streaming(self, streaming: bool) -> bool: + """If the model name allows for streaming and streaming is declared, set streaming to True.""" + return streaming + + def _determine_callback_array( + self, streaming + ) -> List[AsyncIteratorCallbackHandler]: + """If streaming is set, set the AsyncIteratorCallbackHandler as the only callback.""" + if streaming: + return [AsyncIteratorCallbackHandler()] + else: + return [] + + def __init__(self, **data): + super().__init__(**data) + self.streaming = self._determine_streaming(self.streaming) + self.callbacks = self._determine_callback_array(self.streaming) + + @property + def prompt_to_use(self) -> str: + return get_prompt_to_use(None, self.prompt_id) + + @property + def prompt_to_use_id(self) -> Optional[UUID]: + return get_prompt_to_use_id(None, self.prompt_id) + + def _create_llm( + self, + model, + temperature=0, + streaming=False, + callbacks=None, + ) -> BaseChatModel: + """ + Determine the language model to be used. + :param model: Language model name to be used. + :param streaming: Whether to enable streaming of the model + :param callbacks: Callbacks to be used for streaming + :return: Language model instance + """ + api_base = None + if self.brain_settings.ollama_api_base_url and model.startswith("ollama"): + api_base = self.brain_settings.ollama_api_base_url + + return ChatLiteLLM( + temperature=temperature, + model=model, + streaming=streaming, + verbose=True, + callbacks=callbacks, + max_tokens=self.max_tokens, + api_base=api_base, + ) + + def _create_prompt_template(self): + messages = [ + HumanMessagePromptTemplate.from_template("{question}"), + ] + CHAT_PROMPT = ChatPromptTemplate.from_messages(messages) + return CHAT_PROMPT + + def generate_answer( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> GetChatHistoryOutput: + # Move format_chat_history to chat service ? + transformed_history = format_chat_history( + chat_service.get_chat_history(self.chat_id) + ) + prompt_content = ( + self.prompt_to_use.content if self.prompt_to_use else SYSTEM_MESSAGE + ) + + messages = format_history_to_openai_mesages( + transformed_history, prompt_content, question.question + ) + answering_llm = self._create_llm( + model=self.model, + streaming=False, + callbacks=self.callbacks, + ) + model_prediction = answering_llm.predict_messages(messages) + answer = model_prediction.content + if save_answer: + new_chat = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "brain_id": None, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "message_time": new_chat.message_time, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": None, + "message_id": new_chat.message_id, + } + ) + else: + return GetChatHistoryOutput( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": answer, + "message_time": None, + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": None, + "message_id": None, + } + ) + + async def generate_stream( + self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True + ) -> AsyncIterable: + callback = AsyncIteratorCallbackHandler() + self.callbacks = [callback] + + transformed_history = format_chat_history( + chat_service.get_chat_history(self.chat_id) + ) + prompt_content = ( + self.prompt_to_use.content if self.prompt_to_use else SYSTEM_MESSAGE + ) + + messages = format_history_to_openai_mesages( + transformed_history, prompt_content, question.question + ) + answering_llm = self._create_llm( + model=self.model, + streaming=True, + callbacks=self.callbacks, + ) + + CHAT_PROMPT = ChatPromptTemplate.from_messages(messages) + headlessChain = LLMChain(llm=answering_llm, prompt=CHAT_PROMPT) + + response_tokens = [] + + async def wrap_done(fn: Awaitable, event: asyncio.Event): + try: + await fn + except Exception as e: + logger.error(f"Caught exception: {e}") + finally: + event.set() + + run = asyncio.create_task( + wrap_done( + headlessChain.acall({}), + callback.done, + ), + ) + + if save_answer: + streamed_chat_history = chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": chat_id, + "user_message": question.question, + "assistant": "", + "brain_id": None, + "prompt_id": self.prompt_to_use_id, + } + ) + ) + + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": streamed_chat_history.message_id, + "message_time": streamed_chat_history.message_time, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": None, + } + ) + else: + streamed_chat_history = GetChatHistoryOutput( + **{ + "chat_id": str(chat_id), + "message_id": None, + "message_time": None, + "user_message": question.question, + "assistant": "", + "prompt_title": ( + self.prompt_to_use.title if self.prompt_to_use else None + ), + "brain_name": None, + } + ) + + async for token in callback.aiter(): + response_tokens.append(token) + streamed_chat_history.assistant = token + yield f"data: {json.dumps(streamed_chat_history.dict())}" + + await run + assistant = "".join(response_tokens) + + if save_answer: + chat_service.update_message_by_id( + message_id=str(streamed_chat_history.message_id), + user_message=question.question, + assistant=assistant, + ) + + model_config = ConfigDict(arbitrary_types_allowed=True) diff --git a/backend/core/quivr_core/api/modules/brain/qa_interface.py b/backend/core/quivr_core/api/modules/brain/qa_interface.py new file mode 100644 index 000000000000..7eac8497582c --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/qa_interface.py @@ -0,0 +1,58 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_core.api.modules.chat.dto.chats import ChatQuestion + + +def model_compatible_with_function_calling(model: str): + return model in [ + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", + "gpt-4-1106-preview", + "gpt-4", + "gpt-4-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0613", + ] + + +class QAInterface(ABC): + """ + Abstract class for all QA interfaces. + This can be used to implement custom answer generation logic. + """ + + @abstractmethod + def calculate_pricing(self): + raise NotImplementedError( + "calculate_pricing is an abstract method and must be implemented" + ) + + @abstractmethod + def generate_answer( + self, + chat_id: UUID, + question: ChatQuestion, + save_answer: bool, + *custom_params: tuple, + ): + raise NotImplementedError( + "generate_answer is an abstract method and must be implemented" + ) + + @abstractmethod + async def generate_stream( + self, + chat_id: UUID, + question: ChatQuestion, + save_answer: bool, + *custom_params: tuple, + ): + raise NotImplementedError( + "generate_stream is an abstract method and must be implemented" + ) diff --git a/backend/core/quivr_core/api/modules/brain/rags/__init__.py b/backend/core/quivr_core/api/modules/brain/rags/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py b/backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py new file mode 100644 index 000000000000..817fa13a2afd --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py @@ -0,0 +1,377 @@ +import datetime +import os +from operator import itemgetter +from typing import List, Optional +from uuid import UUID + +from langchain.chains import ConversationalRetrievalChain +from langchain.llms.base import BaseLLM +from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate +from langchain.retrievers import ContextualCompressionRetriever +from langchain.retrievers.document_compressors import FlashrankRerank +from langchain.schema import format_document +from langchain_cohere import CohereRerank +from langchain_community.chat_models import ChatLiteLLM +from langchain_core.output_parsers import StrOutputParser +from langchain_core.prompts import ChatPromptTemplate, PromptTemplate +from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 +from langchain_core.pydantic_v1 import Field as FieldV1 +from langchain_core.runnables import RunnableLambda, RunnablePassthrough +from langchain_openai import ChatOpenAI +from pydantic import BaseModel, ConfigDict +from pydantic_settings import BaseSettings +from supabase.client import Client + +from quivr_core.api.logger import get_logger + +# Importing settings related to the 'brain' +from quivr_core.api.models.settings import ( + BrainSettings, + get_embedding_client, + get_supabase_client, +) +from quivr_core.api.modules.brain.qa_interface import ( + model_compatible_with_function_calling, +) +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service +from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository +from quivr_core.api.modules.prompt.service.get_prompt_to_use import get_prompt_to_use +from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore + +logger = get_logger(__name__) + + +class cited_answer(BaseModelV1): + """Answer the user question based only on the given sources, and cite the sources used.""" + + thoughts: str = FieldV1( + ..., + description="""Description of the thought process, based only on the given sources. + Cite the text as much as possible and give the document name it appears in. In the format : 'Doc_name states : cited_text'. Be the most + procedural as possible. Write all the steps needed to find the answer until you find it.""", + ) + answer: str = FieldV1( + ..., + description="The answer to the user question, which is based only on the given sources.", + ) + citations: List[int] = FieldV1( + ..., + description="The integer IDs of the SPECIFIC sources which justify the answer.", + ) + + thoughts: str = FieldV1( + ..., + description="Explain shortly what you did to find the answer and what you used by citing the sources by their name.", + ) + followup_questions: List[str] = FieldV1( + ..., + description="Generate up to 3 follow-up questions that could be asked based on the answer given or context provided.", + ) + + +# First step is to create the Rephrasing Prompt +_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. + +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question:""" +CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) + +# Next is the answering prompt + +template_answer = """ +Context: +{context} + +User Question: {question} +Answer: +""" + +today_date = datetime.datetime.now().strftime("%B %d, %Y") + +system_message_template = ( + f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}." +) + +system_message_template += """ +When answering use markdown. +Use markdown code blocks for code snippets. +Answer in a concise and clear manner. +Use the following pieces of context from files provided by the user to answer the users. +Answer in the same language as the user question. +If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer. +Don't cite the source id in the answer objects, but you can use the source to answer the question. +You have access to the files to answer the user question (limited to first 20 files): +{files} + +If not None, User instruction to follow to answer: {custom_instructions} +Don't cite the source id in the answer objects, but you can use the source to answer the question. +""" + + +ANSWER_PROMPT = ChatPromptTemplate.from_messages( + [ + SystemMessagePromptTemplate.from_template(system_message_template), + HumanMessagePromptTemplate.from_template(template_answer), + ] +) + + +# How we format documents + +DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template( + template="Source: {index} \n {page_content}" +) + + +def is_valid_uuid(uuid_to_test, version=4): + try: + uuid_obj = UUID(uuid_to_test, version=version) + except ValueError: + return False + + return str(uuid_obj) == uuid_to_test + + +brain_service = BrainService() +chat_service = get_service(ChatService)() + + +class QuivrRAG(BaseModel): + """ + Quivr implementation of the RAGInterface. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + # Instantiate settings + brain_settings: BaseSettings = BrainSettings() + # Default class attributes + model: str = None # pyright: ignore reportPrivateUsage=none + temperature: float = 0.1 + chat_id: str = None # pyright: ignore reportPrivateUsage=none + brain_id: str = None # pyright: ignore reportPrivateUsage=none + max_tokens: int = 2000 # Output length + max_input: int = 2000 + streaming: bool = False + knowledge_service: KnowledgeRepository = None + + def prompt_to_use(self): + if self.brain_id and is_valid_uuid(self.brain_id): + return get_prompt_to_use(UUID(self.brain_id), self.prompt_id) + else: + return None + + supabase_client: Optional[Client] = None + vector_store: Optional[CustomSupabaseVectorStore] = None + qa: Optional[ConversationalRetrievalChain] = None + prompt_id: Optional[UUID] = None + + def __init__( + self, + model: str, + brain_id: str, + chat_id: str, + streaming: bool = False, + prompt_id: Optional[UUID] = None, + max_tokens: int = 2000, + max_input: int = 2000, + **kwargs, + ): + super().__init__( + model=model, + brain_id=brain_id, + chat_id=chat_id, + streaming=streaming, + max_tokens=max_tokens, + max_input=max_input, + **kwargs, + ) + self.supabase_client = get_supabase_client() + self.vector_store = self._create_vector_store() + self.prompt_id = prompt_id + self.max_tokens = max_tokens + self.max_input = max_input + self.model = model + self.brain_id = brain_id + self.chat_id = chat_id + self.streaming = streaming + self.knowledge_service = KnowledgeRepository() + + def _create_vector_store(self) -> CustomSupabaseVectorStore: + embeddings = get_embedding_client() + return CustomSupabaseVectorStore( + self.supabase_client, + embeddings, + table_name="vectors", + brain_id=self.brain_id, + max_input=self.max_input, + ) + + def _create_llm( + self, + callbacks, + model, + streaming=False, + temperature=0, + ) -> BaseLLM: + """ + Create a LLM with the given parameters + """ + if streaming and callbacks is None: + raise ValueError( + "Callbacks must be provided when using streaming language models" + ) + + api_base = None + if self.brain_settings.ollama_api_base_url and model.startswith("ollama"): + api_base = ( + self.brain_settings.ollama_api_base_url # pyright: ignore reportPrivateUsage=none + ) + + return ChatLiteLLM( + temperature=temperature, + max_tokens=self.max_tokens, + model=model, + streaming=streaming, + verbose=False, + callbacks=callbacks, + api_base=api_base, + ) # pyright: ignore reportPrivateUsage=none + + def _combine_documents( + self, docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n" + ): + # for each docs, add an index in the metadata to be able to cite the sources + for doc, index in zip(docs, range(len(docs))): + doc.metadata["index"] = index + doc_strings = [format_document(doc, document_prompt) for doc in docs] + return document_separator.join(doc_strings) + + def get_retriever(self): + return self.vector_store.as_retriever() + + def filter_history( + self, chat_history, max_history: int = 10, max_tokens: int = 2000 + ): + """ + Filter out the chat history to only include the messages that are relevant to the current question + + Takes in a chat_history= [HumanMessage(content='Qui est Chloé ? '), AIMessage(content="Chloé est une salariée travaillant pour l'entreprise Quivr en tant qu'AI Engineer, sous la direction de son supérieur hiérarchique, Stanislas Girard."), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content=''), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content="Désolé, je n'ai pas d'autres informations sur Chloé à partir des fichiers fournis.")] + Returns a filtered chat_history with in priority: first max_tokens, then max_history where a Human message and an AI message count as one pair + a token is 4 characters + """ + chat_history = chat_history[::-1] + total_tokens = 0 + total_pairs = 0 + filtered_chat_history = [] + for i in range(0, len(chat_history), 2): + if i + 1 < len(chat_history): + human_message = chat_history[i] + ai_message = chat_history[i + 1] + message_tokens = ( + len(human_message.content) + len(ai_message.content) + ) // 4 + if ( + total_tokens + message_tokens > max_tokens + or total_pairs >= max_history + ): + break + filtered_chat_history.append(human_message) + filtered_chat_history.append(ai_message) + total_tokens += message_tokens + total_pairs += 1 + chat_history = filtered_chat_history[::-1] + + return chat_history + + def get_chain(self): + list_files_array = self.knowledge_service.get_all_knowledge_in_brain( + self.brain_id + ) # pyright: ignore reportPrivateUsage=none + + list_files_array = [file.file_name or file.url for file in list_files_array] + # Max first 10 files + if len(list_files_array) > 20: + list_files_array = list_files_array[:20] + + list_files = "\n".join(list_files_array) if list_files_array else "None" + + # TODO(@aminediro) : Should be a class level attribute + compressor = None + if os.getenv("COHERE_API_KEY"): + compressor = CohereRerank(top_n=20) + else: + compressor = FlashrankRerank(model="ms-marco-TinyBERT-L-2-v2", top_n=20) + + retriever_doc = self.get_retriever() + compression_retriever = ContextualCompressionRetriever( + base_compressor=compressor, base_retriever=retriever_doc + ) + + loaded_memory = RunnablePassthrough.assign( + chat_history=RunnableLambda( + lambda x: self.filter_history(x["chat_history"]), + ), + question=lambda x: x["question"], + ) + + api_base = None + if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): + api_base = self.brain_settings.ollama_api_base_url + + standalone_question = { + "standalone_question": { + "question": lambda x: x["question"], + "chat_history": itemgetter("chat_history"), + } + | CONDENSE_QUESTION_PROMPT + | ChatLiteLLM(temperature=0, model=self.model, api_base=api_base) + | StrOutputParser(), + } + + prompt_custom_user = self.prompt_to_use() + prompt_to_use = "None" + if prompt_custom_user: + prompt_to_use = prompt_custom_user.content + + # Now we retrieve the documents + retrieved_documents = { + "docs": itemgetter("standalone_question") | compression_retriever, + "question": lambda x: x["standalone_question"], + "custom_instructions": lambda x: prompt_to_use, + } + + final_inputs = { + "context": lambda x: self._combine_documents(x["docs"]), + "question": itemgetter("question"), + "custom_instructions": itemgetter("custom_instructions"), + "files": lambda x: list_files, + } + llm = ChatLiteLLM( + max_tokens=self.max_tokens, + model=self.model, + temperature=self.temperature, + api_base=api_base, + ) # pyright: ignore reportPrivateUsage=none + if model_compatible_with_function_calling(self.model): + # And finally, we do the part that returns the answers + llm_function = ChatOpenAI( + max_tokens=self.max_tokens, + model=self.model, + temperature=self.temperature, + ) + llm = llm_function.bind_tools( + [cited_answer], + tool_choice="cited_answer", + ) + + answer = { + "answer": final_inputs | ANSWER_PROMPT | llm, + "docs": itemgetter("docs"), + } + + return loaded_memory | standalone_question | retrieved_documents | answer diff --git a/backend/core/quivr_core/api/modules/brain/rags/rag_interface.py b/backend/core/quivr_core/api/modules/brain/rags/rag_interface.py new file mode 100644 index 000000000000..9364eb0dfcf3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/rags/rag_interface.py @@ -0,0 +1,31 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + +from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler +from langchain.chains.combine_documents.base import BaseCombineDocumentsChain +from langchain.chains.llm import LLMChain +from langchain_core.retrievers import BaseRetriever + + +class RAGInterface(ABC): + @abstractmethod + def get_doc_chain( + self, + streaming: bool, + callbacks: Optional[List[AsyncIteratorCallbackHandler]] = None, + ) -> BaseCombineDocumentsChain: + raise NotImplementedError( + "get_doc_chain is an abstract method and must be implemented" + ) + + @abstractmethod + def get_question_generation_llm(self) -> LLMChain: + raise NotImplementedError( + "get_question_generation_llm is an abstract method and must be implemented" + ) + + @abstractmethod + def get_retriever(self) -> BaseRetriever: + raise NotImplementedError( + "get_retriever is an abstract method and must be implemented" + ) diff --git a/backend/core/quivr_core/api/modules/brain/repository/__init__.py b/backend/core/quivr_core/api/modules/brain/repository/__init__.py new file mode 100644 index 000000000000..46e80777797f --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/__init__.py @@ -0,0 +1,4 @@ +from .brains import Brains +from .brains_users import BrainsUsers +from .brains_vectors import BrainsVectors +from .integration_brains import IntegrationBrain, IntegrationDescription diff --git a/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py b/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py new file mode 100644 index 000000000000..6b4c49f8758d --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py @@ -0,0 +1,60 @@ +from typing import Optional +from uuid import UUID + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.dto.inputs import CreateApiBrainDefinition +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainDefinitionEntity, +) +from quivr_core.api.modules.brain.repository.interfaces import ( + ApiBrainDefinitionsInterface, +) + + +class ApiBrainDefinitions(ApiBrainDefinitionsInterface): + def __init__(self): + self.db = get_supabase_client() + + def get_api_brain_definition( + self, brain_id: UUID + ) -> Optional[ApiBrainDefinitionEntity]: + response = ( + self.db.table("api_brain_definition") + .select("*") + .filter("brain_id", "eq", brain_id) + .execute() + ) + if len(response.data) == 0: + return None + + return ApiBrainDefinitionEntity(**response.data[0]) + + def add_api_brain_definition( + self, brain_id: UUID, api_brain_definition: CreateApiBrainDefinition + ) -> Optional[ApiBrainDefinitionEntity]: + response = ( + self.db.table("api_brain_definition") + .insert([{"brain_id": str(brain_id), **api_brain_definition.dict()}]) + .execute() + ) + if len(response.data) == 0: + return None + return ApiBrainDefinitionEntity(**response.data[0]) + + def update_api_brain_definition( + self, brain_id: UUID, api_brain_definition: ApiBrainDefinitionEntity + ) -> Optional[ApiBrainDefinitionEntity]: + response = ( + self.db.table("api_brain_definition") + .update(api_brain_definition.dict(exclude={"brain_id"})) + .filter("brain_id", "eq", str(brain_id)) + .execute() + ) + if len(response.data) == 0: + return None + return ApiBrainDefinitionEntity(**response.data[0]) + + def delete_api_brain_definition(self, brain_id: UUID) -> None: + self.db.table("api_brain_definition").delete().filter( + "brain_id", "eq", str(brain_id) + ).execute() diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains.py b/backend/core/quivr_core/api/modules/brain/repository/brains.py new file mode 100644 index 000000000000..ce3c844ee0b0 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/brains.py @@ -0,0 +1,127 @@ +from uuid import UUID + +from sqlalchemy import text + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import ( + get_embedding_client, + get_pg_database_engine, + get_supabase_client, +) +from quivr_core.api.modules.brain.dto.inputs import BrainUpdatableProperties +from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity, PublicBrain +from quivr_core.api.modules.brain.repository.interfaces.brains_interface import ( + BrainsInterface, +) + +logger = get_logger(__name__) + + +class Brains(BrainsInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + pg_engine = get_pg_database_engine() + self.pg_engine = pg_engine + + def create_brain(self, brain): + embeddings = get_embedding_client() + string_to_embed = f"Name: {brain.name} Description: {brain.description}" + brain_meaning = embeddings.embed_query(string_to_embed) + brain_dict = brain.dict( + exclude={ + "brain_definition", + "brain_secrets_values", + "connected_brains_ids", + "integration", + } + ) + brain_dict["meaning"] = brain_meaning + response = (self.db.table("brains").insert(brain_dict)).execute() + + return BrainEntity(**response.data[0]) + + def get_public_brains(self): + response = ( + self.db.from_("brains") + .select( + "id:brain_id, name, description, last_update, brain_type, brain_definition: api_brain_definition(*), number_of_subscribers:brains_users(count)" + ) + .filter("status", "eq", "public") + .execute() + ) + public_brains: list[PublicBrain] = [] + + for item in response.data: + item["number_of_subscribers"] = item["number_of_subscribers"][0]["count"] + if not item["brain_definition"]: + del item["brain_definition"] + else: + item["brain_definition"]["secrets"] = [] + + public_brains.append(PublicBrain(**item)) + return public_brains + + def update_brain_last_update_time(self, brain_id): + try: + with self.pg_engine.begin() as connection: + query = """ + UPDATE brains + SET last_update = now() + WHERE brain_id = '{brain_id}' + """ + connection.execute(text(query.format(brain_id=brain_id))) + except Exception as e: + logger.error(e) + + def get_brain_details(self, brain_id): + with self.pg_engine.begin() as connection: + query = """ + SELECT * FROM brains + WHERE brain_id = '{brain_id}' + """ + response = connection.execute( + text(query.format(brain_id=brain_id)) + ).fetchall() + if len(response) == 0: + return None + return BrainEntity(**response[0]._mapping) + + def delete_brain(self, brain_id: str): + with self.pg_engine.begin() as connection: + results = connection.execute( + text(f"DELETE FROM brains WHERE brain_id = '{brain_id}'") + ) + + return results + + def update_brain_by_id( + self, brain_id: UUID, brain: BrainUpdatableProperties + ) -> BrainEntity | None: + embeddings = get_embedding_client() + string_to_embed = f"Name: {brain.name} Description: {brain.description}" + brain_meaning = embeddings.embed_query(string_to_embed) + brain_dict = brain.dict(exclude_unset=True) + brain_dict["meaning"] = brain_meaning + update_brain_response = ( + self.db.table("brains") + .update(brain_dict) + .match({"brain_id": brain_id}) + .execute() + ).data + + if len(update_brain_response) == 0: + return None + + return BrainEntity(**update_brain_response[0]) + + def get_brain_by_id(self, brain_id: UUID) -> BrainEntity | None: + # TODO: merge this method with get_brain_details + with self.pg_engine.begin() as connection: + response = connection.execute( + text(f"SELECT * FROM brains WHERE brain_id = '{brain_id}'") + ).fetchall() + + if len(response) == 0: + return None + return BrainEntity(**response[0]._mapping) diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains_users.py b/backend/core/quivr_core/api/modules/brain/repository/brains_users.py new file mode 100644 index 000000000000..07410f79a4e4 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/brains_users.py @@ -0,0 +1,207 @@ +from uuid import UUID + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import get_embedding_client, get_supabase_client +from quivr_core.api.modules.brain.entity.brain_entity import ( + BrainUser, + MinimalUserBrainEntity, +) +from quivr_core.api.modules.brain.repository.interfaces.brains_users_interface import ( + BrainsUsersInterface, +) + +logger = get_logger(__name__) + + +class BrainsUsers(BrainsUsersInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def update_meaning(self, brain: MinimalUserBrainEntity): + embeddings = get_embedding_client() + string_to_embed = f"Name: {brain.name} Description: {brain.description}" + brain_meaning = embeddings.embed_query(string_to_embed) + brain_dict = {"meaning": brain_meaning} + response = ( + self.db.table("brains") + .update(brain_dict) + .match({"brain_id": brain.id}) + .execute() + ).data + + if len(response) == 0: + return False + + return True + + def get_user_brains(self, user_id) -> list[MinimalUserBrainEntity]: + response = ( + self.db.from_("brains_users") + .select( + "id:brain_id, rights, brains (brain_id, name, status, brain_type, description, meaning, integrations_user (brain_id, integration_id, integrations (id, integration_name, integration_logo_url, max_files)))" + ) + .filter("user_id", "eq", user_id) + .execute() + ) + user_brains: list[MinimalUserBrainEntity] = [] + for item in response.data: + integration_logo_url = "" + max_files = 5000 + if item["brains"]["brain_type"] == "integration": + if "integrations_user" in item["brains"]: + for integration_user in item["brains"]["integrations_user"]: + if "integrations" in integration_user: + integration_logo_url = integration_user["integrations"][ + "integration_logo_url" + ] + max_files = integration_user["integrations"]["max_files"] + + user_brains.append( + MinimalUserBrainEntity( + id=item["brains"]["brain_id"], + name=item["brains"]["name"], + rights=item["rights"], + status=item["brains"]["status"], + brain_type=item["brains"]["brain_type"], + description=( + item["brains"]["description"] + if item["brains"]["description"] is not None + else "" + ), + integration_logo_url=str(integration_logo_url), + max_files=max_files, + ) + ) + user_brains[-1].rights = item["rights"] + if item["brains"]["meaning"] is None: + self.update_meaning(user_brains[-1]) + + return user_brains + + def get_brain_for_user(self, user_id, brain_id): + response = ( + self.db.from_("brains_users") + .select( + "id:brain_id, rights, brains (id: brain_id, status, name, brain_type, description)" + ) + .filter("user_id", "eq", user_id) + .filter("brain_id", "eq", brain_id) + .execute() + ) + if len(response.data) == 0: + return None + brain_data = response.data[0] + + return MinimalUserBrainEntity( + id=brain_data["brains"]["id"], + name=brain_data["brains"]["name"], + rights=brain_data["rights"], + status=brain_data["brains"]["status"], + brain_type=brain_data["brains"]["brain_type"], + description=( + brain_data["brains"]["description"] + if brain_data["brains"]["description"] is not None + else "" + ), + integration_logo_url="", + max_files=100, + ) + + def delete_brain_user_by_id( + self, + user_id: UUID, + brain_id: UUID, + ): + results = ( + self.db.table("brains_users") + .delete() + .match({"brain_id": str(brain_id), "user_id": str(user_id)}) + .execute() + ) + return results.data + + def delete_brain_users(self, brain_id: str): + results = ( + self.db.table("brains_users") + .delete() + .match({"brain_id": brain_id}) + .execute() + ) + + return results + + def create_brain_user(self, user_id: UUID, brain_id, rights, default_brain: bool): + response = ( + self.db.table("brains_users") + .insert( + { + "brain_id": str(brain_id), + "user_id": str(user_id), + "rights": rights, + "default_brain": default_brain, + } + ) + .execute() + ) + return response + + def get_user_default_brain_id(self, user_id: UUID) -> UUID | None: + response = ( + self.db.from_("brains_users") + .select("brain_id") + .filter("user_id", "eq", user_id) + .filter("default_brain", "eq", True) + .execute() + ).data + if len(response) == 0: + return None + return UUID(response[0].get("brain_id")) + + def get_brain_users(self, brain_id: UUID) -> list[BrainUser]: + response = ( + self.db.table("brains_users") + .select("id:brain_id, *") + .filter("brain_id", "eq", str(brain_id)) + .execute() + ) + + return [BrainUser(**item) for item in response.data] + + def delete_brain_subscribers(self, brain_id: UUID): + results = ( + self.db.table("brains_users") + .delete() + .match({"brain_id": str(brain_id)}) + .match({"rights": "Viewer"}) + .execute() + ).data + + return results + + def get_brain_subscribers_count(self, brain_id: UUID) -> int: + response = ( + self.db.from_("brains_users") + .select( + "count", + ) + .filter("brain_id", "eq", str(brain_id)) + .execute() + ).data + if len(response) == 0: + raise ValueError(f"Brain with id {brain_id} does not exist.") + return response[0]["count"] + + def update_brain_user_default_status( + self, user_id: UUID, brain_id: UUID, default_brain: bool + ): + self.db.table("brains_users").update({"default_brain": default_brain}).match( + {"brain_id": brain_id, "user_id": user_id} + ).execute() + + def update_brain_user_rights( + self, user_id: UUID, brain_id: UUID, rights: str + ) -> None: + self.db.table("brains_users").update({"rights": rights}).match( + {"brain_id": brain_id, "user_id": user_id} + ).execute() diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py b/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py new file mode 100644 index 000000000000..2e6ed26ee7f0 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py @@ -0,0 +1,105 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.repository.interfaces.brains_vectors_interface import ( + BrainsVectorsInterface, +) + +logger = get_logger(__name__) + + +class BrainsVectors(BrainsVectorsInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def create_brain_vector(self, brain_id, vector_id, file_sha1): + response = ( + self.db.table("brains_vectors") + .insert( + { + "brain_id": str(brain_id), + "vector_id": str(vector_id), + "file_sha1": file_sha1, + } + ) + .execute() + ) + return response.data + + def get_vector_ids_from_file_sha1(self, file_sha1: str): + # move to vectors class + vectorsResponse = ( + self.db.table("vectors") + .select("id") + .filter("file_sha1", "eq", file_sha1) + .execute() + ) + return vectorsResponse.data + + def get_brain_vector_ids(self, brain_id): + """ + Retrieve unique brain data (i.e. uploaded files and crawled websites). + """ + + response = ( + self.db.from_("brains_vectors") + .select("vector_id") + .filter("brain_id", "eq", brain_id) + .execute() + ) + + vector_ids = [item["vector_id"] for item in response.data] + + if len(vector_ids) == 0: + return [] + + return vector_ids + + def delete_file_from_brain(self, brain_id, file_name: str): + # First, get the vector_ids associated with the file_name + # TODO: filter by brain_id + file_vectors = ( + self.db.table("vectors") + .select("id") + .filter("metadata->>file_name", "eq", file_name) + .execute() + ) + + file_vectors_ids = [item["id"] for item in file_vectors.data] + + # remove current file vectors from brain vectors + self.db.table("brains_vectors").delete().filter( + "vector_id", "in", f"({','.join(map(str, file_vectors_ids))})" + ).filter("brain_id", "eq", brain_id).execute() + + vectors_used_by_another_brain = ( + self.db.table("brains_vectors") + .select("vector_id") + .filter("vector_id", "in", f"({','.join(map(str, file_vectors_ids))})") + .filter("brain_id", "neq", brain_id) + .execute() + ) + + vectors_used_by_another_brain_ids = [ + item["vector_id"] for item in vectors_used_by_another_brain.data + ] + + vectors_no_longer_used_ids = [ + id for id in file_vectors_ids if id not in vectors_used_by_another_brain_ids + ] + + self.db.table("vectors").delete().filter( + "id", "in", f"({','.join(map(str, vectors_no_longer_used_ids))})" + ).execute() + + return {"message": f"File {file_name} in brain {brain_id} has been deleted."} + + def delete_brain_vector(self, brain_id: str): + results = ( + self.db.table("brains_vectors") + .delete() + .match({"brain_id": brain_id}) + .execute() + ) + + return results diff --git a/backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py b/backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py new file mode 100644 index 000000000000..38d9515e54ea --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py @@ -0,0 +1,63 @@ +from uuid import UUID + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.entity.composite_brain_connection_entity import ( + CompositeBrainConnectionEntity, +) +from quivr_core.api.modules.brain.repository.interfaces import ( + CompositeBrainsConnectionsInterface, +) + +logger = get_logger(__name__) + + +class CompositeBrainsConnections(CompositeBrainsConnectionsInterface): + def __init__(self): + self.db = get_supabase_client() + + def connect_brain( + self, composite_brain_id: UUID, connected_brain_id: UUID + ) -> CompositeBrainConnectionEntity: + response = ( + self.db.table("composite_brain_connections") + .insert( + { + "composite_brain_id": str(composite_brain_id), + "connected_brain_id": str(connected_brain_id), + } + ) + .execute() + ) + + return CompositeBrainConnectionEntity(**response.data[0]) + + def get_connected_brains(self, composite_brain_id: UUID) -> list[UUID]: + response = ( + self.db.from_("composite_brain_connections") + .select("connected_brain_id") + .filter("composite_brain_id", "eq", str(composite_brain_id)) + .execute() + ) + + return [item["connected_brain_id"] for item in response.data] + + def disconnect_brain( + self, composite_brain_id: UUID, connected_brain_id: UUID + ) -> None: + self.db.table("composite_brain_connections").delete().match( + { + "composite_brain_id": composite_brain_id, + "connected_brain_id": connected_brain_id, + } + ).execute() + + def is_connected_brain(self, brain_id: UUID) -> bool: + response = ( + self.db.from_("composite_brain_connections") + .select("connected_brain_id") + .filter("connected_brain_id", "eq", str(brain_id)) + .execute() + ) + + return len(response.data) > 0 diff --git a/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py b/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py new file mode 100644 index 000000000000..1eb4fa4feab4 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py @@ -0,0 +1,60 @@ +from uuid import UUID + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.repository.interfaces.external_api_secrets_interface import ( + ExternalApiSecretsInterface, +) + + +def build_secret_unique_name(user_id: UUID, brain_id: UUID, secret_name: str): + return f"{user_id}-{brain_id}-{secret_name}" + + +class ExternalApiSecrets(ExternalApiSecretsInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def create_secret( + self, user_id: UUID, brain_id: UUID, secret_name: str, secret_value + ) -> UUID | None: + response = self.db.rpc( + "insert_secret", + { + "name": build_secret_unique_name( + user_id=user_id, brain_id=brain_id, secret_name=secret_name + ), + "secret": secret_value, + }, + ).execute() + + return response.data + + def read_secret( + self, + user_id: UUID, + brain_id: UUID, + secret_name: str, + ) -> UUID | None: + response = self.db.rpc( + "read_secret", + { + "secret_name": build_secret_unique_name( + user_id=user_id, brain_id=brain_id, secret_name=secret_name + ), + }, + ).execute() + + return response.data + + def delete_secret(self, user_id: UUID, brain_id: UUID, secret_name: str) -> bool: + response = self.db.rpc( + "delete_secret", + { + "secret_name": build_secret_unique_name( + user_id=user_id, brain_id=brain_id, secret_name=secret_name + ), + }, + ).execute() + + return response.data diff --git a/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py b/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py new file mode 100644 index 000000000000..0192a3e3547e --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py @@ -0,0 +1,148 @@ +from abc import ABC, abstractmethod +from typing import List + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.entity.integration_brain import ( + IntegrationDescriptionEntity, + IntegrationEntity, +) +from quivr_core.api.modules.brain.repository.interfaces.integration_brains_interface import ( + IntegrationBrainInterface, + IntegrationDescriptionInterface, +) + + +class Integration(ABC): + @abstractmethod + def load(self): + pass + + @abstractmethod + def poll(self): + pass + + +class IntegrationBrain(IntegrationBrainInterface): + """This is all the methods to interact with the integration brain. + + Args: + IntegrationBrainInterface (_type_): _description_ + """ + + def __init__(self): + self.db = get_supabase_client() + + def get_integration_brain(self, brain_id, user_id=None): + query = ( + self.db.table("integrations_user") + .select("*") + .filter("brain_id", "eq", brain_id) + ) + + if user_id: + query.filter("user_id", "eq", user_id) + + response = query.execute() + + if len(response.data) == 0: + return None + + return IntegrationEntity(**response.data[0]) + + def update_last_synced(self, brain_id, user_id): + response = ( + self.db.table("integrations_user") + .update({"last_synced": "now()"}) + .filter("brain_id", "eq", str(brain_id)) + .filter("user_id", "eq", str(user_id)) + .execute() + ) + if len(response.data) == 0: + return None + return IntegrationEntity(**response.data[0]) + + def add_integration_brain(self, brain_id, user_id, integration_id, settings): + response = ( + self.db.table("integrations_user") + .insert( + [ + { + "brain_id": str(brain_id), + "user_id": str(user_id), + "integration_id": str(integration_id), + "settings": settings, + } + ] + ) + .execute() + ) + if len(response.data) == 0: + return None + return IntegrationEntity(**response.data[0]) + + def update_integration_brain(self, brain_id, user_id, integration_brain): + response = ( + self.db.table("integrations_user") + .update(integration_brain.dict(exclude={"brain_id", "user_id"})) + .filter("brain_id", "eq", str(brain_id)) + .filter("user_id", "eq", str(user_id)) + .execute() + ) + if len(response.data) == 0: + return None + return IntegrationEntity(**response.data[0]) + + def delete_integration_brain(self, brain_id, user_id): + self.db.table("integrations_user").delete().filter( + "brain_id", "eq", str(brain_id) + ).filter("user_id", "eq", str(user_id)).execute() + return None + + def get_integration_brain_by_type_integration( + self, integration_name + ) -> List[IntegrationEntity]: + response = ( + self.db.table("integrations_user") + .select("*, integrations ()") + .filter("integrations.integration_name", "eq", integration_name) + .execute() + ) + if len(response.data) == 0: + return None + + return [IntegrationEntity(**data) for data in response.data] + + +class IntegrationDescription(IntegrationDescriptionInterface): + def __init__(self): + self.db = get_supabase_client() + + def get_integration_description(self, integration_id): + response = ( + self.db.table("integrations") + .select("*") + .filter("id", "eq", integration_id) + .execute() + ) + if len(response.data) == 0: + return None + + return IntegrationDescriptionEntity(**response.data[0]) + + def get_integration_description_by_user_brain_id(self, brain_id, user_id): + response = ( + self.db.table("integrations_user") + .select("*") + .filter("brain_id", "eq", brain_id) + .filter("user_id", "eq", user_id) + .execute() + ) + if len(response.data) == 0: + return None + + integration_id = response.data[0]["integration_id"] + return self.get_integration_description(integration_id) + + def get_all_integration_descriptions(self): + response = self.db.table("integrations").select("*").execute() + return [IntegrationDescriptionEntity(**data) for data in response.data] diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py new file mode 100644 index 000000000000..7e38450bcc4c --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py @@ -0,0 +1,10 @@ +from .api_brain_definitions_interface import ApiBrainDefinitionsInterface +from .brains_interface import BrainsInterface +from .brains_users_interface import BrainsUsersInterface +from .brains_vectors_interface import BrainsVectorsInterface +from .composite_brains_connections_interface import CompositeBrainsConnectionsInterface +from .external_api_secrets_interface import ExternalApiSecretsInterface +from .integration_brains_interface import ( + IntegrationBrainInterface, + IntegrationDescriptionInterface, +) diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py new file mode 100644 index 000000000000..73fc6591d871 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py @@ -0,0 +1,38 @@ +from abc import ABC, abstractmethod +from typing import Optional +from uuid import UUID + +from quivr_core.api.modules.brain.dto.inputs import CreateApiBrainDefinition +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainDefinitionEntity, +) + + +class ApiBrainDefinitionsInterface(ABC): + @abstractmethod + def get_api_brain_definition( + self, brain_id: UUID + ) -> Optional[ApiBrainDefinitionEntity]: + pass + + @abstractmethod + def add_api_brain_definition( + self, brain_id: UUID, api_brain_definition: CreateApiBrainDefinition + ) -> Optional[ApiBrainDefinitionEntity]: + pass + + @abstractmethod + def update_api_brain_definition( + self, brain_id: UUID, api_brain_definition: ApiBrainDefinitionEntity + ) -> Optional[ApiBrainDefinitionEntity]: + """ + Get all public brains + """ + pass + + @abstractmethod + def delete_api_brain_definition(self, brain_id: UUID) -> None: + """ + Update the last update time of the brain + """ + pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py new file mode 100644 index 000000000000..e2a7920266df --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py @@ -0,0 +1,61 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_core.api.modules.brain.dto.inputs import ( + BrainUpdatableProperties, + CreateBrainProperties, +) +from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity, PublicBrain + + +class BrainsInterface(ABC): + @abstractmethod + def create_brain(self, brain: CreateBrainProperties) -> BrainEntity: + """ + Create a brain in the brains table + """ + pass + + @abstractmethod + def get_public_brains(self) -> list[PublicBrain]: + """ + Get all public brains + """ + pass + + @abstractmethod + def get_brain_details(self, brain_id: UUID, user_id: UUID) -> BrainEntity | None: + """ + Get all public brains + """ + pass + + @abstractmethod + def update_brain_last_update_time(self, brain_id: UUID) -> None: + """ + Update the last update time of the brain + """ + pass + + @abstractmethod + def delete_brain(self, brain_id: UUID): + """ + Delete a brain + """ + pass + + @abstractmethod + def update_brain_by_id( + self, brain_id: UUID, brain: BrainUpdatableProperties + ) -> BrainEntity | None: + """ + Update a brain by id + """ + pass + + @abstractmethod + def get_brain_by_id(self, brain_id: UUID) -> BrainEntity | None: + """ + Get a brain by id + """ + pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py new file mode 100644 index 000000000000..fe3288c2055a --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py @@ -0,0 +1,95 @@ +from abc import ABC, abstractmethod +from typing import List +from uuid import UUID + +from quivr_core.api.modules.brain.entity.brain_entity import ( + BrainUser, + MinimalUserBrainEntity, +) + + +class BrainsUsersInterface(ABC): + @abstractmethod + def get_user_brains(self, user_id) -> list[MinimalUserBrainEntity]: + """ + Create a brain in the brains table + """ + pass + + @abstractmethod + def get_brain_for_user(self, user_id, brain_id) -> MinimalUserBrainEntity | None: + """ + Get a brain for a user + """ + pass + + @abstractmethod + def delete_brain_user_by_id( + self, + user_id: UUID, + brain_id: UUID, + ): + """ + Delete a user in a brain + """ + pass + + @abstractmethod + def delete_brain_users(self, brain_id: str): + """ + Delete all users for a brain + """ + pass + + @abstractmethod + def create_brain_user(self, user_id: UUID, brain_id, rights, default_brain: bool): + """ + Create a brain user + """ + pass + + @abstractmethod + def get_user_default_brain_id(self, user_id: UUID) -> UUID | None: + """ + Get the default brain id for a user + """ + pass + + @abstractmethod + def get_brain_users(self, brain_id: UUID) -> List[BrainUser]: + """ + Get all users for a brain + """ + pass + + @abstractmethod + def delete_brain_subscribers(self, brain_id: UUID): + """ + Delete all subscribers for a brain with Viewer rights + """ + pass + + @abstractmethod + def get_brain_subscribers_count(self, brain_id: UUID) -> int: + """ + Get the number of subscribers for a brain + """ + pass + + @abstractmethod + def update_brain_user_default_status( + self, user_id: UUID, brain_id: UUID, default_brain: bool + ): + """ + Update the default brain status for a user + """ + pass + + @abstractmethod + def update_brain_user_rights( + self, brain_id: UUID, user_id: UUID, rights: str + ) -> BrainUser: + """ + Update the rights for a user in a brain + """ + pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py new file mode 100644 index 000000000000..35d0e2729a45 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py @@ -0,0 +1,41 @@ +from abc import ABC, abstractmethod +from typing import List +from uuid import UUID + + +# TODO: Replace BrainsVectors with KnowledgeVectors interface instead +class BrainsVectorsInterface(ABC): + @abstractmethod + def create_brain_vector(self, brain_id, vector_id, file_sha1): + """ + Create a brain vector + """ + pass + + @abstractmethod + def get_vector_ids_from_file_sha1(self, file_sha1: str): + """ + Get vector ids from file sha1 + """ + pass + + @abstractmethod + def get_brain_vector_ids(self, brain_id) -> List[UUID]: + """ + Get brain vector ids + """ + pass + + @abstractmethod + def delete_file_from_brain(self, brain_id, file_name: str): + """ + Delete file from brain + """ + pass + + @abstractmethod + def delete_brain_vector(self, brain_id: str): + """ + Delete brain vector + """ + pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py new file mode 100644 index 000000000000..170c20889ed4 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py @@ -0,0 +1,40 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_core.api.modules.brain.entity.composite_brain_connection_entity import ( + CompositeBrainConnectionEntity, +) + + +class CompositeBrainsConnectionsInterface(ABC): + @abstractmethod + def connect_brain( + self, composite_brain_id: UUID, connected_brain_id: UUID + ) -> CompositeBrainConnectionEntity: + """ + Connect a brain to a composite brain in the composite_brain_connections table + """ + pass + + @abstractmethod + def get_connected_brains(self, composite_brain_id: UUID) -> list[UUID]: + """ + Get all brains connected to a composite brain + """ + pass + + @abstractmethod + def disconnect_brain( + self, composite_brain_id: UUID, connected_brain_id: UUID + ) -> None: + """ + Disconnect a brain from a composite brain + """ + pass + + @abstractmethod + def is_connected_brain(self, brain_id: UUID) -> bool: + """ + Check if a brain is connected to any composite brain + """ + pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py new file mode 100644 index 000000000000..b2f2439d1634 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py @@ -0,0 +1,29 @@ +from abc import ABC, abstractmethod +from uuid import UUID + + +class ExternalApiSecretsInterface(ABC): + @abstractmethod + def create_secret( + self, user_id: UUID, brain_id: UUID, secret_name: str, secret_value + ) -> UUID | None: + """ + Create a new secret for the API Request in given brain + """ + pass + + @abstractmethod + def read_secret( + self, user_id: UUID, brain_id: UUID, secret_name: str + ) -> UUID | None: + """ + Read a secret for the API Request in given brain + """ + pass + + @abstractmethod + def delete_secret(self, user_id: UUID, brain_id: UUID, secret_name: str) -> bool: + """ + Delete a secret from a brain + """ + pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py new file mode 100644 index 000000000000..725368b25b24 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py @@ -0,0 +1,63 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_core.api.modules.brain.entity.integration_brain import ( + IntegrationDescriptionEntity, + IntegrationEntity, +) + + +class IntegrationBrainInterface(ABC): + @abstractmethod + def get_integration_brain(self, brain_id: UUID) -> IntegrationEntity: + """Get the integration brain entity + + Args: + brain_id (UUID): ID of the brain + + Returns: + IntegrationEntity: Integration brain entity + """ + pass + + @abstractmethod + def add_integration_brain( + self, brain_id: UUID, integration_brain: IntegrationEntity + ) -> IntegrationEntity: + pass + + @abstractmethod + def update_integration_brain( + self, brain_id: UUID, integration_brain: IntegrationEntity + ) -> IntegrationEntity: + pass + + @abstractmethod + def delete_integration_brain(self, brain_id: UUID) -> None: + pass + + +class IntegrationDescriptionInterface(ABC): + @abstractmethod + def get_integration_description( + self, integration_id: UUID + ) -> IntegrationDescriptionEntity: + """Get the integration description entity + + Args: + integration_id (UUID): ID of the integration + + Returns: + IntegrationEntity: Integration description entity + """ + pass + + @abstractmethod + def get_all_integration_descriptions(self) -> list[IntegrationDescriptionEntity]: + pass + + @abstractmethod + def get_integration_description_by_user_brain_id( + self, brain_id: UUID, user_id: UUID + ) -> IntegrationDescriptionEntity: + pass diff --git a/backend/core/quivr_core/api/modules/brain/service/__init__.py b/backend/core/quivr_core/api/modules/brain/service/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py b/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py new file mode 100644 index 000000000000..80c3f8f351d1 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py @@ -0,0 +1,40 @@ +from typing import Optional +from uuid import UUID + +from quivr_core.api.modules.brain.dto.inputs import CreateApiBrainDefinition +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainDefinitionEntity, +) +from quivr_core.api.modules.brain.repository.api_brain_definitions import ( + ApiBrainDefinitions, +) +from quivr_core.api.modules.brain.repository.interfaces import ( + ApiBrainDefinitionsInterface, +) + + +class ApiBrainDefinitionService: + repository: ApiBrainDefinitionsInterface + + def __init__(self): + self.repository = ApiBrainDefinitions() + + def add_api_brain_definition( + self, brain_id: UUID, api_brain_definition: CreateApiBrainDefinition + ) -> None: + self.repository.add_api_brain_definition(brain_id, api_brain_definition) + + def delete_api_brain_definition(self, brain_id: UUID) -> None: + self.repository.delete_api_brain_definition(brain_id) + + def get_api_brain_definition( + self, brain_id: UUID + ) -> Optional[ApiBrainDefinitionEntity]: + return self.repository.get_api_brain_definition(brain_id) + + def update_api_brain_definition( + self, brain_id: UUID, api_brain_definition: ApiBrainDefinitionEntity + ) -> Optional[ApiBrainDefinitionEntity]: + return self.repository.update_api_brain_definition( + brain_id, api_brain_definition + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py new file mode 100644 index 000000000000..72e989c00579 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py @@ -0,0 +1,78 @@ +from typing import List, Optional, Union +from uuid import UUID + +from fastapi import Depends, HTTPException, status + +from quivr_core.api.middlewares.auth.auth_bearer import get_current_user +from quivr_core.api.modules.brain.entity.brain_entity import RoleEnum +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService +from quivr_core.api.modules.user.entity.user_identity import UserIdentity + +brain_user_service = BrainUserService() +brain_service = BrainService() + + +def has_brain_authorization( + required_roles: Optional[Union[RoleEnum, List[RoleEnum]]] = RoleEnum.Owner, +): + """ + Decorator to check if the user has the required role(s) for the brain + param: required_roles: The role(s) required to access the brain + return: A wrapper function that checks the authorization + """ + + async def wrapper( + brain_id: UUID, current_user: UserIdentity = Depends(get_current_user) + ): + nonlocal required_roles + if isinstance(required_roles, str): + required_roles = [required_roles] # Convert single role to a list + validate_brain_authorization( + brain_id=brain_id, user_id=current_user.id, required_roles=required_roles + ) + + return wrapper + + +def validate_brain_authorization( + brain_id: UUID, + user_id: UUID, + required_roles: Optional[Union[RoleEnum, List[RoleEnum]]] = RoleEnum.Owner, +): + """ + Function to check if the user has the required role(s) for the brain + param: brain_id: The id of the brain + param: user_id: The id of the user + param: required_roles: The role(s) required to access the brain + return: None + """ + + brain = brain_service.get_brain_details(brain_id, user_id) + + if brain and brain.status == "public": + return + + if required_roles is None: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Missing required role", + ) + + user_brain = brain_user_service.get_brain_for_user(user_id, brain_id) + if user_brain is None: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You don't have permission for this brain", + ) + + # Convert single role to a list to handle both cases + if isinstance(required_roles, str): + required_roles = [required_roles] + + # Check if the user has at least one of the required roles + if user_brain.rights not in required_roles: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You don't have the required role(s) for this brain", + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_service.py new file mode 100644 index 000000000000..8c2a0a27c5e9 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_service.py @@ -0,0 +1,370 @@ +from typing import Optional +from uuid import UUID + +from fastapi import HTTPException + +from quivr_core.api.celery_config import celery +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.dto.inputs import ( + BrainUpdatableProperties, + CreateBrainProperties, +) +from quivr_core.api.modules.brain.entity.brain_entity import ( + BrainEntity, + BrainType, + PublicBrain, +) +from quivr_core.api.modules.brain.entity.integration_brain import IntegrationEntity +from quivr_core.api.modules.brain.repository import ( + Brains, + BrainsUsers, + BrainsVectors, + IntegrationBrain, + IntegrationDescription, +) +from quivr_core.api.modules.brain.service.api_brain_definition_service import ( + ApiBrainDefinitionService, +) +from quivr_core.api.modules.brain.service.utils.validate_brain import validate_api_brain +from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService +from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore + +logger = get_logger(__name__) + +knowledge_service = KnowledgeService() +api_brain_definition_service = ApiBrainDefinitionService() + + +class BrainService: + # brain_repository: BrainsInterface + # brain_user_repository: BrainsUsersInterface + # brain_vector_repository: BrainsVectorsInterface + # external_api_secrets_repository: ExternalApiSecretsInterface + # integration_brains_repository: IntegrationBrainInterface + # integration_description_repository: IntegrationDescriptionInterface + + def __init__(self): + self.brain_repository: Brains = Brains() + self.brain_user_repository = BrainsUsers() + self.brain_vector = BrainsVectors() + self.integration_brains_repository = IntegrationBrain() + self.integration_description_repository = IntegrationDescription() + + def get_brain_by_id(self, brain_id: UUID): + return self.brain_repository.get_brain_by_id(brain_id) + + def get_integration_brain(self, brain_id) -> IntegrationEntity | None: + return self.integration_brains_repository.get_integration_brain(brain_id) + + def find_brain_from_question( + self, + brain_id: UUID, + question: str, + user, + chat_id: UUID, + history, + vector_store: CustomSupabaseVectorStore, + ) -> (Optional[BrainEntity], dict[str, str]): + """Find the brain to use for a question. + + Args: + brain_id (UUID): ID of the brain to use if exists + question (str): Question for which to find the brain + user (UserEntity): User asking the question + chat_id (UUID): ID of the chat + + Returns: + Optional[BrainEntity]: Returns the brain to use for the question + """ + metadata = {} + + # Init + + brain_id_to_use = brain_id + brain_to_use = None + + # Get the first question from the chat_question + + question = question + + list_brains = [] # To return + + if history and not brain_id_to_use: + question = history[0].user_message + brain_id_to_use = history[0].brain_id + brain_to_use = self.get_brain_by_id(brain_id_to_use) + + # If a brain_id is provided, use it + if brain_id_to_use and not brain_to_use: + brain_to_use = self.get_brain_by_id(brain_id_to_use) + + else: + # Calculate the closest brains to the question + list_brains = vector_store.find_brain_closest_query(user.id, question) + + unique_list_brains = [] + seen_brain_ids = set() + + for brain in list_brains: + if brain["id"] not in seen_brain_ids: + unique_list_brains.append(brain) + seen_brain_ids.add(brain["id"]) + + metadata["close_brains"] = unique_list_brains[:5] + + if list_brains and not brain_to_use: + brain_id_to_use = list_brains[0]["id"] + brain_to_use = self.get_brain_by_id(brain_id_to_use) + + return brain_to_use, metadata + + def create_brain( + self, + user_id: UUID, + brain: Optional[CreateBrainProperties], + ) -> BrainEntity: + if brain is None: + brain = CreateBrainProperties() # type: ignore model and brain_definition + + if brain.brain_type == BrainType.api: + validate_api_brain(brain) + return self.create_brain_api(user_id, brain) + + if brain.brain_type == BrainType.composite: + return self.create_brain_composite(brain) + + if brain.brain_type == BrainType.integration: + return self.create_brain_integration(user_id, brain) + + created_brain = self.brain_repository.create_brain(brain) + return created_brain + + def create_brain_api( + self, + user_id: UUID, + brain: CreateBrainProperties, + ) -> BrainEntity: + created_brain = self.brain_repository.create_brain(brain) + + if brain.brain_definition is not None: + api_brain_definition_service.add_api_brain_definition( + brain_id=created_brain.brain_id, + api_brain_definition=brain.brain_definition, + ) + + secrets_values = brain.brain_secrets_values + + for secret_name in secrets_values: + self.external_api_secrets_repository.create_secret( + user_id=user_id, + brain_id=created_brain.brain_id, + secret_name=secret_name, + secret_value=secrets_values[secret_name], + ) + + return created_brain + + def create_brain_composite( + self, + brain: CreateBrainProperties, + ) -> BrainEntity: + created_brain = self.brain_repository.create_brain(brain) + + if brain.connected_brains_ids is not None: + for connected_brain_id in brain.connected_brains_ids: + self.composite_brains_connections_repository.connect_brain( + composite_brain_id=created_brain.brain_id, + connected_brain_id=connected_brain_id, + ) + + return created_brain + + def create_brain_integration( + self, + user_id: UUID, + brain: CreateBrainProperties, + ) -> BrainEntity: + created_brain = self.brain_repository.create_brain(brain) + if brain.integration is not None: + self.integration_brains_repository.add_integration_brain( + user_id=user_id, + brain_id=created_brain.brain_id, + integration_id=brain.integration.integration_id, + settings=brain.integration.settings, + ) + if ( + self.integration_description_repository.get_integration_description( + brain.integration.integration_id + ).integration_name.lower() + == "notion" + ): + celery.send_task( + "NotionConnectorLoad", + kwargs={"brain_id": created_brain.brain_id, "user_id": user_id}, + ) + return created_brain + + def delete_brain_secrets_values(self, brain_id: UUID) -> None: + brain_definition = api_brain_definition_service.get_api_brain_definition( + brain_id=brain_id + ) + + if brain_definition is None: + raise HTTPException(status_code=404, detail="Brain definition not found.") + + secrets = brain_definition.secrets + + if len(secrets) > 0: + brain_users = self.brain_user_repository.get_brain_users(brain_id=brain_id) + for user in brain_users: + for secret in secrets: + self.external_api_secrets_repository.delete_secret( + user_id=user.user_id, + brain_id=brain_id, + secret_name=secret.name, + ) + + def delete_brain(self, brain_id: UUID) -> dict[str, str]: + brain_to_delete = self.get_brain_by_id(brain_id=brain_id) + if brain_to_delete is None: + raise HTTPException(status_code=404, detail="Brain not found.") + + if brain_to_delete.brain_type == BrainType.api: + self.delete_brain_secrets_values( + brain_id=brain_id, + ) + api_brain_definition_service.delete_api_brain_definition(brain_id=brain_id) + else: + knowledge_service.remove_brain_all_knowledge(brain_id) + + self.brain_vector.delete_brain_vector(str(brain_id)) + self.brain_user_repository.delete_brain_users(str(brain_id)) + self.brain_repository.delete_brain(str(brain_id)) # type: ignore + + return {"message": "Brain deleted."} + + def get_brain_prompt_id(self, brain_id: UUID) -> UUID | None: + brain = self.get_brain_by_id(brain_id) + prompt_id = brain.prompt_id if brain else None + + return prompt_id + + def update_brain_by_id( + self, brain_id: UUID, brain_new_values: BrainUpdatableProperties + ) -> BrainEntity: + """Update a prompt by id""" + + existing_brain = self.brain_repository.get_brain_by_id(brain_id) + + if existing_brain is None: + raise HTTPException( + status_code=404, + detail=f"Brain with id {brain_id} not found", + ) + brain_update_answer = self.brain_repository.update_brain_by_id( + brain_id, + brain=BrainUpdatableProperties( + **brain_new_values.dict( + exclude={"brain_definition", "connected_brains_ids", "integration"} + ) + ), + ) + + if brain_update_answer is None: + raise HTTPException( + status_code=404, + detail=f"Brain with id {brain_id} not found", + ) + + if ( + brain_update_answer.brain_type == BrainType.api + and brain_new_values.brain_definition + ): + existing_brain_secrets_definition = ( + existing_brain.brain_definition.secrets + if existing_brain.brain_definition + else None + ) + brain_new_values_secrets_definition = ( + brain_new_values.brain_definition.secrets + if brain_new_values.brain_definition + else None + ) + should_remove_existing_secrets_values = ( + existing_brain_secrets_definition + and brain_new_values_secrets_definition + and existing_brain_secrets_definition + != brain_new_values_secrets_definition + ) + + if should_remove_existing_secrets_values: + self.delete_brain_secrets_values(brain_id=brain_id) + + api_brain_definition_service.update_api_brain_definition( + brain_id, + api_brain_definition=brain_new_values.brain_definition, + ) + + if brain_update_answer is None: + raise HTTPException( + status_code=404, + detail=f"Brain with id {brain_id} not found", + ) + + self.brain_repository.update_brain_last_update_time(brain_id) + return brain_update_answer + + def update_brain_last_update_time(self, brain_id: UUID): + self.brain_repository.update_brain_last_update_time(brain_id) + + def get_brain_details( + self, brain_id: UUID, user_id: UUID | None = None + ) -> BrainEntity | None: + brain = self.brain_repository.get_brain_details(brain_id) + if brain is None: + return None + + # TODO: N+1 here !! + if brain.brain_type == BrainType.integration: + brain.integration = ( + self.integration_brains_repository.get_integration_brain( + brain_id, user_id + ) + ) + + if brain.integration: + brain.integration_description = ( + self.integration_description_repository.get_integration_description( + brain.integration.integration_id + ) + ) + + return brain + + def get_connected_brains(self, brain_id: UUID) -> list[BrainEntity]: + return self.composite_brains_connections_repository.get_connected_brains( + brain_id + ) + + def get_public_brains(self) -> list[PublicBrain]: + return self.brain_repository.get_public_brains() + + def update_secret_value( + self, + user_id: UUID, + brain_id: UUID, + secret_name: str, + secret_value: str, + ) -> None: + """Update an existing secret.""" + self.external_api_secrets_repository.delete_secret( + user_id=user_id, + brain_id=brain_id, + secret_name=secret_name, + ) + self.external_api_secrets_repository.create_secret( + user_id=user_id, + brain_id=brain_id, + secret_name=secret_name, + secret_value=secret_value, + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py new file mode 100644 index 000000000000..efe9797fc7fa --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py @@ -0,0 +1,2 @@ +from .resend_invitation_email import resend_invitation_email +from .subscription_invitation_service import SubscriptionInvitationService diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py new file mode 100644 index 000000000000..ffc0f17297a3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py @@ -0,0 +1,57 @@ +from uuid import UUID + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.brains_subscription_invitations import BrainSubscription +from quivr_core.api.models.settings import BrainSettings +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.packages.emails.send_email import send_email + +logger = get_logger(__name__) + +brain_service = BrainService() + + +def get_brain_url(origin: str, brain_id: UUID) -> str: + """Generates the brain URL based on the brain_id.""" + + return f"{origin}/invitation/{brain_id}" + + +def resend_invitation_email( + brain_subscription: BrainSubscription, + inviter_email: str, + user_id: UUID, + origin: str = "https://chat.quivr.app", +): + brains_settings = BrainSettings() # pyright: ignore reportPrivateUsage=none + + brain_url = get_brain_url(origin, brain_subscription.brain_id) + + invitation_brain = brain_service.get_brain_details( + brain_subscription.brain_id, user_id + ) + if invitation_brain is None: + raise Exception("Brain not found") + brain_name = invitation_brain.name + + html_body = f""" +

Brain {brain_name} has been shared with you by {inviter_email}.

+

Click here to access your brain.

+ """ + + try: + r = send_email( + { + "from": brains_settings.resend_email_address, + "to": [brain_subscription.email], + "subject": "Quivr - Brain Shared With You", + "reply_to": "no-reply@quivr.app", + "html": html_body, + } + ) + logger.info("Resend response", r) + except Exception as e: + logger.error(f"Error sending email: {e}") + return + + return r diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py new file mode 100644 index 000000000000..84448ee379f7 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py @@ -0,0 +1,106 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.models.brains_subscription_invitations import BrainSubscription +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService +from quivr_core.api.modules.user.service.user_service import UserService + +logger = get_logger(__name__) + + +brain_user_service = BrainUserService() +user_service = UserService() + + +class SubscriptionInvitationService: + def __init__(self): + self.supabase_client = get_supabase_client() + + def create_subscription_invitation(self, brain_subscription: BrainSubscription): + logger.info("Creating subscription invitation") + response = ( + self.supabase_client.table("brain_subscription_invitations") + .insert( + { + "brain_id": str(brain_subscription.brain_id), + "email": brain_subscription.email, + "rights": brain_subscription.rights, + } + ) + .execute() + ) + return response.data + + def update_subscription_invitation(self, brain_subscription: BrainSubscription): + logger.info("Updating subscription invitation") + response = ( + self.supabase_client.table("brain_subscription_invitations") + .update({"rights": brain_subscription.rights}) + .eq("brain_id", str(brain_subscription.brain_id)) + .eq("email", brain_subscription.email) + .execute() + ) + return response.data + + def create_or_update_subscription_invitation( + self, + brain_subscription: BrainSubscription, + ) -> bool: + """ + Creates a subscription invitation if it does not exist, otherwise updates it. + Returns True if the invitation was created or updated and False if user already has access. + """ + response = ( + self.supabase_client.table("brain_subscription_invitations") + .select("*") + .eq("brain_id", str(brain_subscription.brain_id)) + .eq("email", brain_subscription.email) + .execute() + ) + + if response.data: + self.update_subscription_invitation(brain_subscription) + return True + else: + user_id = user_service.get_user_id_by_email(brain_subscription.email) + brain_user = None + + if user_id is not None: + brain_id = brain_subscription.brain_id + brain_user = brain_user_service.get_brain_for_user(user_id, brain_id) + + if brain_user is None: + self.create_subscription_invitation(brain_subscription) + return True + + return False + + def fetch_invitation(self, subscription: BrainSubscription): + logger.info("Fetching subscription invitation") + response = ( + self.supabase_client.table("brain_subscription_invitations") + .select("*") + .eq("brain_id", str(subscription.brain_id)) + .eq("email", subscription.email) + .execute() + ) + if response.data: + return response.data[0] # return the first matching invitation + else: + return None + + def remove_invitation(self, subscription: BrainSubscription): + logger.info( + f"Removing subscription invitation for email {subscription.email} and brain {subscription.brain_id}" + ) + response = ( + self.supabase_client.table("brain_subscription_invitations") + .delete() + .eq("brain_id", str(subscription.brain_id)) + .eq("email", subscription.email) + .execute() + ) + logger.info( + f"Removed subscription invitation for email {subscription.email} and brain {subscription.brain_id}" + ) + logger.info(response) + return response.data diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py new file mode 100644 index 000000000000..de07051bd34f --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py @@ -0,0 +1,116 @@ +from typing import List +from uuid import UUID + +from fastapi import HTTPException + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.brain_entity import ( + BrainEntity, + BrainType, + BrainUser, + MinimalUserBrainEntity, + RoleEnum, +) +from quivr_core.api.modules.brain.repository.brains import Brains +from quivr_core.api.modules.brain.repository.brains_users import BrainsUsers +from quivr_core.api.modules.brain.repository.external_api_secrets import ( + ExternalApiSecrets, +) +from quivr_core.api.modules.brain.repository.interfaces.brains_interface import ( + BrainsInterface, +) +from quivr_core.api.modules.brain.repository.interfaces.brains_users_interface import ( + BrainsUsersInterface, +) +from quivr_core.api.modules.brain.repository.interfaces.external_api_secrets_interface import ( + ExternalApiSecretsInterface, +) +from quivr_core.api.modules.brain.service.api_brain_definition_service import ( + ApiBrainDefinitionService, +) +from quivr_core.api.modules.brain.service.brain_service import BrainService + +logger = get_logger(__name__) + +brain_service = BrainService() +api_brain_definition_service = ApiBrainDefinitionService() + + +class BrainUserService: + brain_repository: BrainsInterface + brain_user_repository: BrainsUsersInterface + external_api_secrets_repository: ExternalApiSecretsInterface + + def __init__(self): + self.brain_repository = Brains() + self.brain_user_repository = BrainsUsers() + self.external_api_secrets_repository = ExternalApiSecrets() + + def get_user_default_brain(self, user_id: UUID) -> BrainEntity | None: + brain_id = self.brain_user_repository.get_user_default_brain_id(user_id) + + if brain_id is None: + return None + + return brain_service.get_brain_by_id(brain_id) + + def delete_brain_user(self, user_id: UUID, brain_id: UUID) -> None: + brain_to_delete_user_from = brain_service.get_brain_by_id(brain_id=brain_id) + if brain_to_delete_user_from is None: + raise HTTPException(status_code=404, detail="Brain not found.") + + if brain_to_delete_user_from.brain_type == BrainType.api: + brain_definition = api_brain_definition_service.get_api_brain_definition( + brain_id=brain_id + ) + if brain_definition is None: + raise HTTPException( + status_code=404, detail="Brain definition not found." + ) + secrets = brain_definition.secrets + for secret in secrets: + self.external_api_secrets_repository.delete_secret( + user_id=user_id, + brain_id=brain_id, + secret_name=secret.name, + ) + + self.brain_user_repository.delete_brain_user_by_id( + user_id=user_id, + brain_id=brain_id, + ) + + def delete_brain_users(self, brain_id: UUID) -> None: + self.brain_user_repository.delete_brain_subscribers( + brain_id=brain_id, + ) + + def create_brain_user( + self, user_id: UUID, brain_id: UUID, rights: RoleEnum, is_default_brain: bool + ): + self.brain_user_repository.create_brain_user( + user_id=user_id, + brain_id=brain_id, + rights=rights, + default_brain=is_default_brain, + ) + + def get_brain_for_user(self, user_id: UUID, brain_id: UUID): + return self.brain_user_repository.get_brain_for_user(user_id, brain_id) # type: ignore + + def get_user_brains(self, user_id: UUID) -> list[MinimalUserBrainEntity]: + results = self.brain_user_repository.get_user_brains(user_id) # type: ignore + + return results # type: ignore + + def get_brain_users(self, brain_id: UUID) -> List[BrainUser]: + return self.brain_user_repository.get_brain_users(brain_id) + + def update_brain_user_rights( + self, brain_id: UUID, user_id: UUID, rights: str + ) -> None: + self.brain_user_repository.update_brain_user_rights( + brain_id=brain_id, + user_id=user_id, + rights=rights, + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py new file mode 100644 index 000000000000..90929c79d88a --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py @@ -0,0 +1,63 @@ +from typing import Any, List +from uuid import UUID + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.repository.brains_vectors import BrainsVectors +from quivr_core.api.modules.brain.repository.interfaces.brains_vectors_interface import ( + BrainsVectorsInterface, +) +from quivr_core.api.modules.knowledge.repository.storage import Storage +from quivr_core.api.packages.embeddings.vectors import get_unique_files_from_vector_ids + +logger = get_logger(__name__) + + +class BrainVectorService: + repository: BrainsVectorsInterface + id: UUID + files: List[Any] = [] + + def __init__(self, brain_id: UUID): + self.repository = BrainsVectors() + self.id = brain_id + + def create_brain_vector(self, vector_id, file_sha1): + return self.repository.create_brain_vector(self.id, vector_id, file_sha1) # type: ignore + + def update_brain_with_file(self, file_sha1: str): + # not used + vector_ids = self.repository.get_vector_ids_from_file_sha1(file_sha1) + if vector_ids == None or len(vector_ids) == 0: + logger.info(f"No vector ids found for file {file_sha1}") + return + + for vector_id in vector_ids: + self.create_brain_vector(vector_id, file_sha1) + + def get_unique_brain_files(self): + """ + Retrieve unique brain data (i.e. uploaded files and crawled websites). + """ + + vector_ids = self.repository.get_brain_vector_ids(self.id) # type: ignore + self.files = get_unique_files_from_vector_ids(vector_ids) + + return self.files + + def delete_file_from_brain(self, file_name: str, only_vectors: bool = False): + file_name_with_brain_id = f"{self.id}/{file_name}" + storage = Storage() + if not only_vectors: + storage.remove_file(file_name_with_brain_id) + return self.repository.delete_file_from_brain(self.id, file_name) # type: ignore + + def delete_file_url_from_brain(self, file_name: str): + return self.repository.delete_file_from_brain(self.id, file_name) # type: ignore + + @property + def brain_size(self): + # TODO: change the calculation of the brain size, calculate the size stored for the embeddings + what's in the storage + self.get_unique_brain_files() + current_brain_size = sum(float(doc["size"]) for doc in self.files) + + return current_brain_size diff --git a/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py b/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py new file mode 100644 index 000000000000..defddaf92fc1 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py @@ -0,0 +1,118 @@ +from uuid import UUID + +import requests + +from quivr_core.api.logger import get_logger + +logger = get_logger(__name__) + +from fastapi import HTTPException + +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainDefinitionSchema, +) +from quivr_core.api.modules.brain.service.api_brain_definition_service import ( + ApiBrainDefinitionService, +) +from quivr_core.api.modules.brain.service.brain_service import BrainService + +brain_service = BrainService() +api_brain_definition_service = ApiBrainDefinitionService() + + +def get_api_call_response_as_text( + method, api_url, params, search_params, secrets +) -> str: + headers = {} + + api_url_with_search_params = api_url + if search_params: + api_url_with_search_params += "?" + for search_param in search_params: + api_url_with_search_params += ( + f"{search_param}={search_params[search_param]}&" + ) + + for secret in secrets: + headers[secret] = secrets[secret] + + try: + if method in ["GET", "DELETE"]: + response = requests.request( + method, + url=api_url_with_search_params, + params=params or None, + headers=headers or None, + ) + elif method in ["POST", "PUT", "PATCH"]: + response = requests.request( + method, + url=api_url_with_search_params, + json=params or None, + headers=headers or None, + ) + else: + raise ValueError(f"Invalid method: {method}") + + return response.text + + except Exception as e: + logger.error(f"Error calling API: {e}") + return None + + +def extract_api_brain_definition_values_from_llm_output( + brain_schema: ApiBrainDefinitionSchema, arguments: dict +) -> dict: + params_values = {} + properties = brain_schema.properties + required_values = brain_schema.required + for property in properties: + if property.name in arguments: + if property.type == "number": + params_values[property.name] = float(arguments[property.name]) + else: + params_values[property.name] = arguments[property.name] + continue + + if property.name in required_values: + raise HTTPException( + status_code=400, + detail=f"Required parameter {property.name} not found in arguments", + ) + + return params_values + + +def call_brain_api(brain_id: UUID, user_id: UUID, arguments: dict) -> str: + brain_definition = api_brain_definition_service.get_api_brain_definition(brain_id) + + if brain_definition is None: + raise HTTPException( + status_code=404, detail=f"Brain definition {brain_id} not found" + ) + + brain_params_values = extract_api_brain_definition_values_from_llm_output( + brain_definition.params, arguments + ) + + brain_search_params_values = extract_api_brain_definition_values_from_llm_output( + brain_definition.search_params, arguments + ) + + secrets = brain_definition.secrets + secrets_values = {} + + for secret in secrets: + secret_value = brain_service.external_api_secrets_repository.read_secret( + user_id=user_id, brain_id=brain_id, secret_name=secret.name + ) + secrets_values[secret.name] = secret_value + + return get_api_call_response_as_text( + api_url=brain_definition.url, + params=brain_params_values, + search_params=brain_search_params_values, + secrets=secrets_values, + method=brain_definition.method, + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py b/backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py new file mode 100644 index 000000000000..a5c90f9a209e --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py @@ -0,0 +1,65 @@ +import re + +from fastapi import HTTPException + +from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( + ApiBrainDefinitionSchemaProperty, +) +from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity +from quivr_core.api.modules.brain.service.api_brain_definition_service import ( + ApiBrainDefinitionService, +) + +api_brain_definition_service = ApiBrainDefinitionService() + + +def sanitize_function_name(string): + sanitized_string = re.sub(r"[^a-zA-Z0-9_-]", "", string) + + return sanitized_string + + +def format_api_brain_property(property: ApiBrainDefinitionSchemaProperty): + property_data: dict = { + "type": property.type, + "description": property.description, + } + if property.enum: + property_data["enum"] = property.enum + return property_data + + +def get_api_brain_definition_as_json_schema(brain: BrainEntity): + api_brain_definition = api_brain_definition_service.get_api_brain_definition( + brain.id + ) + if not api_brain_definition: + raise HTTPException( + status_code=404, detail=f"Brain definition {brain.id} not found" + ) + + required = [] + required.extend(api_brain_definition.params.required) + required.extend(api_brain_definition.search_params.required) + properties = {} + + api_properties = ( + api_brain_definition.params.properties + + api_brain_definition.search_params.properties + ) + + for property in api_properties: + properties[property.name] = format_api_brain_property(property) + + parameters = { + "type": "object", + "properties": properties, + "required": required, + } + schema = { + "name": sanitize_function_name(brain.name), + "description": brain.description, + "parameters": parameters, + } + + return schema diff --git a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py new file mode 100644 index 000000000000..01c04148eac3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py @@ -0,0 +1,67 @@ +from uuid import UUID + +from attr import dataclass + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import get_embedding_client, get_supabase_client +from quivr_core.api.modules.upload.service.generate_file_signed_url import ( + generate_file_signed_url, +) +from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore + +logger = get_logger(__name__) + + +@dataclass +class DocumentAnswer: + file_name: str + file_sha1: str + file_size: int + file_url: str = "" + file_id: str = "" + file_similarity: float = 0.0 + + +def get_question_context_from_brain(brain_id: UUID, question: str) -> str: + """Finds the best brain to answer the question based on the question's meaning. + + Args: + brain_id (UUID): Id of the brain to search in + question (str): Question to search for in the vector store + + Returns: + str: _descripton_ + """ + # TODO: Move to AnswerGenerator service + supabase_client = get_supabase_client() + embeddings = get_embedding_client() + + vector_store = CustomSupabaseVectorStore( + supabase_client, + embeddings, + table_name="vectors", + brain_id=str(brain_id), + number_docs=20, + ) + documents = vector_store.similarity_search(question, k=20, threshold=0.8) + + answers = [] + file_sha1s = [] + for document in documents: + if document.metadata["file_sha1"] not in file_sha1s: + file_sha1s.append(document.metadata["file_sha1"]) + file_path_in_storage = f"{brain_id}/{document.metadata['file_name']}" + answers.append( + DocumentAnswer( + file_name=document.metadata["file_name"], + file_sha1=document.metadata["file_sha1"], + file_size=document.metadata["file_size"], + file_id=document.metadata["id"], + file_similarity=document.metadata["similarity"], + file_url=generate_file_signed_url(file_path_in_storage).get( + "signedURL", "" + ), + ), + ) + + return answers diff --git a/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py b/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py new file mode 100644 index 000000000000..7fef86900d4e --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py @@ -0,0 +1,31 @@ +from quivr_core.api.modules.brain.entity.integration_brain import ( + IntegrationDescriptionEntity, +) +from quivr_core.api.modules.brain.repository.integration_brains import ( + IntegrationDescription, +) +from quivr_core.api.modules.brain.repository.interfaces import ( + IntegrationDescriptionInterface, +) + + +class IntegrationBrainDescriptionService: + repository: IntegrationDescriptionInterface + + def __init__(self): + self.repository = IntegrationDescription() + + def get_all_integration_descriptions(self) -> list[IntegrationDescriptionEntity]: + return self.repository.get_all_integration_descriptions() + + def get_integration_description( + self, integration_id + ) -> IntegrationDescriptionEntity: + return self.repository.get_integration_description(integration_id) + + def get_integration_description_by_user_brain_id( + self, brain_id, user_id + ) -> IntegrationDescriptionEntity: + return self.repository.get_integration_description_by_user_brain_id( + brain_id, user_id + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/test_brain_service.py b/backend/core/quivr_core/api/modules/brain/service/test_brain_service.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/brain/service/utils/__init__.py b/backend/core/quivr_core/api/modules/brain/service/utils/__init__.py new file mode 100644 index 000000000000..1b5b3a524cf5 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/utils/__init__.py @@ -0,0 +1 @@ +from .validate_brain import validate_api_brain diff --git a/backend/core/quivr_core/api/modules/brain/service/utils/format_chat_history.py b/backend/core/quivr_core/api/modules/brain/service/utils/format_chat_history.py new file mode 100644 index 000000000000..3e3019432bdf --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/utils/format_chat_history.py @@ -0,0 +1,31 @@ +from typing import Dict, List, Tuple + +from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage + +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput + + +def format_chat_history( + history: List[GetChatHistoryOutput], +) -> List[Dict[str, str]]: + """Format the chat history into a list of HumanMessage and AIMessage""" + formatted_history = [] + for chat in history: + if chat.user_message: + formatted_history.append(HumanMessage(content=chat.user_message)) + if chat.assistant: + formatted_history.append(AIMessage(content=chat.assistant)) + return formatted_history + + +def format_history_to_openai_mesages( + tuple_history: List[Tuple[str, str]], system_message: str, question: str +) -> List[BaseMessage]: + """Format the chat history into a list of Base Messages""" + messages = [] + messages.append(SystemMessage(content=system_message)) + for human, ai in tuple_history: + messages.append(HumanMessage(content=human)) + messages.append(AIMessage(content=ai)) + messages.append(HumanMessage(content=question)) + return messages diff --git a/backend/core/quivr_core/api/modules/brain/service/utils/get_prompt_to_use_id.py b/backend/core/quivr_core/api/modules/brain/service/utils/get_prompt_to_use_id.py new file mode 100644 index 000000000000..f27f88fd5f17 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/utils/get_prompt_to_use_id.py @@ -0,0 +1,21 @@ +from typing import Optional +from uuid import UUID + +from quivr_core.api.modules.brain.service.brain_service import BrainService + +brain_service = BrainService() + + +def get_prompt_to_use_id( + brain_id: Optional[UUID], prompt_id: Optional[UUID] +) -> Optional[UUID]: + if brain_id is None and prompt_id is None: + return None + + return ( + prompt_id + if prompt_id + else brain_service.get_brain_prompt_id(brain_id) + if brain_id + else None + ) diff --git a/backend/core/quivr_core/api/modules/brain/service/utils/validate_brain.py b/backend/core/quivr_core/api/modules/brain/service/utils/validate_brain.py new file mode 100644 index 000000000000..faad3b819ca8 --- /dev/null +++ b/backend/core/quivr_core/api/modules/brain/service/utils/validate_brain.py @@ -0,0 +1,14 @@ +from fastapi import HTTPException + +from quivr_core.api.modules.brain.dto.inputs import CreateBrainProperties + + +def validate_api_brain(brain: CreateBrainProperties): + if brain.brain_definition is None: + raise HTTPException(status_code=404, detail="Brain definition not found") + + if brain.brain_definition.url is None: + raise HTTPException(status_code=404, detail="Brain url not found") + + if brain.brain_definition.method is None: + raise HTTPException(status_code=404, detail="Brain method not found") diff --git a/backend/core/quivr_core/api/modules/brain/tests/test_brains_interface.py b/backend/core/quivr_core/api/modules/brain/tests/test_brains_interface.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/__init__.py b/backend/core/quivr_core/api/modules/chat/__init__.py new file mode 100644 index 000000000000..21252557e6b7 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/__init__.py @@ -0,0 +1,6 @@ +# noqa: +from quivr_core.api.modules.brain.entity.brain_entity import Brain +from quivr_core.api.modules.prompt.entity.prompt import Prompt +from quivr_core.api.modules.user.entity.user_identity import User + +__all__ = ["Brain", "User", "Prompt"] diff --git a/backend/core/quivr_core/api/modules/chat/controller/__init__.py b/backend/core/quivr_core/api/modules/chat/controller/__init__.py new file mode 100644 index 000000000000..0ec06ba92835 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/__init__.py @@ -0,0 +1 @@ +from .chat_routes import chat_router diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/__init_.py b/backend/core/quivr_core/api/modules/chat/controller/chat/__init_.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py b/backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py new file mode 100644 index 000000000000..6dd65d1cb306 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py @@ -0,0 +1,113 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.brain_entity import BrainType, RoleEnum +from quivr_core.api.modules.brain.integrations.Big.Brain import BigBrain +from quivr_core.api.modules.brain.integrations.GPT4.Brain import GPT4Brain +from quivr_core.api.modules.brain.integrations.Multi_Contract.Brain import ( + MultiContractBrain, +) +from quivr_core.api.modules.brain.integrations.Notion.Brain import NotionBrain +from quivr_core.api.modules.brain.integrations.Proxy.Brain import ProxyBrain +from quivr_core.api.modules.brain.integrations.Self.Brain import SelfBrain +from quivr_core.api.modules.brain.integrations.SQL.Brain import SQLBrain +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.brain.service.api_brain_definition_service import ( + ApiBrainDefinitionService, +) +from quivr_core.api.modules.brain.service.brain_authorization_service import ( + validate_brain_authorization, +) +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.integration_brain_service import ( + IntegrationBrainDescriptionService, +) +from quivr_core.api.modules.chat.controller.chat.interface import ChatInterface +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service + +chat_service = get_service(ChatService)() +api_brain_definition_service = ApiBrainDefinitionService() +integration_brain_description_service = IntegrationBrainDescriptionService() + +logger = get_logger(__name__) + +models_supporting_function_calls = [ + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-0613", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0613", + "gpt-4-0125-preview", + "gpt-3.5-turbo", + "gpt-4-turbo", + "gpt-4o", +] + + +integration_list = { + "notion": NotionBrain, + "gpt4": GPT4Brain, + "sql": SQLBrain, + "big": BigBrain, + "doc": KnowledgeBrainQA, + "proxy": ProxyBrain, + "self": SelfBrain, + "multi-contract": MultiContractBrain, +} + +brain_service = BrainService() + + +def validate_authorization(user_id, brain_id): + if brain_id: + validate_brain_authorization( + brain_id=brain_id, + user_id=user_id, + required_roles=[RoleEnum.Viewer, RoleEnum.Editor, RoleEnum.Owner], + ) + + +# TODO: redo this +class BrainfulChat(ChatInterface): + def get_answer_generator( + self, + brain, + chat_id, + chat_service, + model, + temperature, + streaming, + prompt_id, + user_id, + user_email, + ): + if brain and brain.brain_type == BrainType.doc: + return KnowledgeBrainQA( + chat_service=chat_service, + chat_id=chat_id, + brain_id=str(brain.brain_id), + streaming=streaming, + prompt_id=prompt_id, + user_id=user_id, + user_email=user_email, + ) + + if brain.brain_type == BrainType.integration: + integration_brain = integration_brain_description_service.get_integration_description_by_user_brain_id( + brain.brain_id, user_id + ) + + integration_class = integration_list.get( + integration_brain.integration_name.lower() + ) + if integration_class: + return integration_class( + chat_service=chat_service, + chat_id=chat_id, + temperature=temperature, + brain_id=str(brain.brain_id), + streaming=streaming, + prompt_id=prompt_id, + user_id=user_id, + user_email=user_email, + ) diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py b/backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py new file mode 100644 index 000000000000..9021c7b99fba --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py @@ -0,0 +1,27 @@ +from llm.qa_headless import HeadlessQA + +from quivr_core.api.modules.chat.controller.chat.interface import ChatInterface + + +class BrainlessChat(ChatInterface): + def validate_authorization(self, user_id, brain_id): + pass + + def get_answer_generator( + self, + chat_id, + model, + max_tokens, + temperature, + streaming, + prompt_id, + user_id, + ): + return HeadlessQA( + chat_id=chat_id, + model=model, + max_tokens=max_tokens, + temperature=temperature, + streaming=streaming, + prompt_id=prompt_id, + ) diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/factory.py b/backend/core/quivr_core/api/modules/chat/controller/chat/factory.py new file mode 100644 index 000000000000..792328feeffc --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/chat/factory.py @@ -0,0 +1,11 @@ +from uuid import UUID + +from .brainful_chat import BrainfulChat +from .brainless_chat import BrainlessChat + + +def get_chat_strategy(brain_id: UUID | None = None): + if brain_id: + return BrainfulChat() + else: + return BrainlessChat() diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/interface.py b/backend/core/quivr_core/api/modules/chat/controller/chat/interface.py new file mode 100644 index 000000000000..a0bbc95c89c5 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/chat/interface.py @@ -0,0 +1,17 @@ +from abc import ABC, abstractmethod + + +class ChatInterface(ABC): + @abstractmethod + def get_answer_generator( + self, + chat_id, + model, + max_tokens, + temperature, + streaming, + prompt_id, + user_id, + chat_question, + ): + pass diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/test_utils.py b/backend/core/quivr_core/api/modules/chat/controller/chat/test_utils.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/utils.py b/backend/core/quivr_core/api/modules/chat/controller/chat/utils.py new file mode 100644 index 000000000000..e0c7b6df4dcc --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/chat/utils.py @@ -0,0 +1,110 @@ +import time +from uuid import UUID + +from fastapi import HTTPException + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.databases.llm_models import LLMModel +from quivr_core.api.modules.user.service.user_usage import UserUsage + +logger = get_logger(__name__) + + +class NullableUUID(UUID): + @classmethod + def __get_validators__(cls): + yield cls.validate + + @classmethod + def validate(v, values, **kwargs): + logger.info(f"Validating UUID: {v}") + if v == "": + return None + try: + return UUID(v) + except ValueError: + return None + + +# TODO: rewrite +def compute_cost(model_to_use, models_settings): + model = model_to_use.name + user_choosen_model_price = 1000 + for model_setting in models_settings: + if model_setting["name"] == model: + user_choosen_model_price = model_setting["price"] + return user_choosen_model_price + + +# TODO: rewrite +def find_model_and_generate_metadata( + brain_model: str | None, + user_settings, + models_settings, +): + # Default model is gpt-3.5-turbo-0125 + default_model = "gpt-3.5-turbo-0125" + model_to_use = LLMModel( # TODO Implement default models in database + name=default_model, price=1, max_input=4000, max_output=1000 + ) + + logger.debug("Brain model: %s", brain_model) + + # If brain.model is None, set it to the default_model + if brain_model is None: + brain_model = default_model + + is_brain_model_available = any( + brain_model == model_dict.get("name") for model_dict in models_settings + ) + + is_user_allowed_model = brain_model in user_settings.get( + "models", [default_model] + ) # Checks if the model is available in the list of models + + logger.debug(f"Brain model: {brain_model}") + logger.debug(f"User models: {user_settings.get('models', [])}") + logger.debug(f"Model available: {is_brain_model_available}") + logger.debug(f"User allowed model: {is_user_allowed_model}") + + if is_brain_model_available and is_user_allowed_model: + # Use the model from the brain + model_to_use.name = brain_model + for model_dict in models_settings: + if model_dict.get("name") == model_to_use.name: + model_to_use.price = model_dict.get("price") + model_to_use.max_input = model_dict.get("max_input") + model_to_use.max_output = model_dict.get("max_output") + break + + logger.info(f"Model to use: {model_to_use}") + + return model_to_use + + +def update_user_usage(usage: UserUsage, user_settings, cost: int = 100): + """Checks the user requests limit. + It checks the user requests limit and raises an exception if the user has reached the limit. + By default, the user has a limit of 100 requests per month. The limit can be increased by upgrading the plan. + + Args: + user (UserIdentity): User object + model (str): Model name for which the user is making the request + + Raises: + HTTPException: Raises a 429 error if the user has reached the limit. + """ + + date = time.strftime("%Y%m%d") + + monthly_chat_credit = user_settings.get("monthly_chat_credit", 100) + montly_usage = usage.get_user_monthly_usage(date) + + if int(montly_usage + cost) > int(monthly_chat_credit): + raise HTTPException( + status_code=429, # pyright: ignore reportPrivateUsage=none + detail=f"You have reached your monthly chat limit of {monthly_chat_credit} requests per months. Please upgrade your plan to increase your monthly chat limit.", + ) + else: + usage.handle_increment_user_request_count(date, cost) + pass diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py new file mode 100644 index 000000000000..3dafe6ace34b --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py @@ -0,0 +1,300 @@ +from typing import Annotated, List, Optional +from uuid import UUID + +from fastapi import APIRouter, Depends, HTTPException, Query, Request +from fastapi.responses import StreamingResponse + +from quivr_core.api.logger import get_logger +from quivr_core.api.middlewares.auth import AuthBearer, get_current_user +from quivr_core.api.models.settings import get_embedding_client, get_supabase_client +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.chat.controller.chat.brainful_chat import ( + BrainfulChat, + validate_authorization, +) +from quivr_core.api.modules.chat.dto.chats import ChatItem, ChatQuestion +from quivr_core.api.modules.chat.dto.inputs import ( + ChatMessageProperties, + ChatUpdatableProperties, + CreateChatProperties, + QuestionAndAnswer, +) +from quivr_core.api.modules.chat.entity.chat import Chat +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.dependencies import get_service +from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository +from quivr_core.api.modules.prompt.service.prompt_service import PromptService +from quivr_core.api.modules.user.entity.user_identity import UserIdentity +from quivr_core.api.packages.quivr_core.rag_service import RAGService +from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry +from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore + +logger = get_logger(__name__) + +chat_router = APIRouter() +brain_service = BrainService() +knowledge_service = KnowledgeRepository() +prompt_service = PromptService() + + +ChatServiceDep = Annotated[ChatService, Depends(get_service(ChatService))] +UserIdentityDep = Annotated[UserIdentity, Depends(get_current_user)] + + +def init_vector_store(user_id: UUID) -> CustomSupabaseVectorStore: + """ + Initialize the vector store + """ + supabase_client = get_supabase_client() + embedding_service = get_embedding_client() + vector_store = CustomSupabaseVectorStore( + supabase_client, embedding_service, table_name="vectors", user_id=user_id + ) + + return vector_store + + +async def get_answer_generator( + chat_id: UUID, + chat_question: ChatQuestion, + chat_service: ChatService, + brain_id: UUID | None, + current_user: UserIdentity, +): + chat_instance = BrainfulChat() + vector_store = init_vector_store(user_id=current_user.id) + + # Get History only if needed + if not brain_id: + history = await chat_service.get_chat_history(chat_id) + else: + history = [] + + # TODO(@aminediro) : NOT USED anymore + brain, metadata_brain = brain_service.find_brain_from_question( + brain_id, chat_question.question, current_user, chat_id, history, vector_store + ) + gpt_answer_generator = chat_instance.get_answer_generator( + brain=brain, + chat_id=str(chat_id), + chat_service=chat_service, + model=brain.model, + temperature=0.1, + streaming=True, + prompt_id=chat_question.prompt_id, + user_id=current_user.id, + user_email=current_user.email, + ) + + return gpt_answer_generator + + +@chat_router.get("/chat/healthz", tags=["Health"]) +async def healthz(): + return {"status": "ok"} + + +# get all chats +@chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"]) +async def get_chats(current_user: UserIdentityDep, chat_service: ChatServiceDep): + """ + Retrieve all chats for the current user. + + - `current_user`: The current authenticated user. + - Returns a list of all chats for the user. + + This endpoint retrieves all the chats associated with the current authenticated user. It returns a list of chat objects + containing the chat ID and chat name for each chat. + """ + chats = await chat_service.get_user_chats(current_user.id) + return {"chats": chats} + + +# delete one chat +@chat_router.delete( + "/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"] +) +async def delete_chat(chat_id: UUID, chat_service: ChatServiceDep): + """ + Delete a specific chat by chat ID. + """ + + chat_service.delete_chat_from_db(chat_id) + return {"message": f"{chat_id} has been deleted."} + + +# update existing chat metadata +@chat_router.put( + "/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"] +) +async def update_chat_metadata_handler( + chat_data: ChatUpdatableProperties, + chat_id: UUID, + current_user: UserIdentityDep, + chat_service: ChatServiceDep, +): + """ + Update chat attributes + """ + + chat = await chat_service.get_chat_by_id(chat_id) + if str(current_user.id) != chat.user_id: + raise HTTPException( + status_code=403, # pyright: ignore reportPrivateUsage=none + detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none + ) + return chat_service.update_chat(chat_id=chat_id, chat_data=chat_data) + + +# update existing message +@chat_router.put("/chat/{chat_id}/{message_id}", tags=["Chat"]) +async def update_chat_message( + chat_message_properties: ChatMessageProperties, + chat_id: UUID, + message_id: UUID, + current_user: UserIdentityDep, + chat_service: ChatServiceDep, +): + chat = await chat_service.get_chat_by_id( + chat_id # pyright: ignore reportPrivateUsage=none + ) + if str(current_user.id) != chat.user_id: + raise HTTPException( + status_code=403, # pyright: ignore reportPrivateUsage=none + detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none + ) + return chat_service.update_chat_message( + chat_id=chat_id, + message_id=message_id, + chat_message_properties=chat_message_properties.dict(), + ) + + +# create new chat +@chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"]) +async def create_chat_handler( + chat_data: CreateChatProperties, + current_user: UserIdentityDep, + chat_service: ChatServiceDep, +): + """ + Create a new chat with initial chat messages. + """ + + return await chat_service.create_chat( + user_id=current_user.id, new_chat_data=chat_data + ) + + +# add new question to chat +@chat_router.post( + "/chat/{chat_id}/question", + dependencies=[ + Depends( + AuthBearer(), + ), + ], + tags=["Chat"], +) +async def create_question_handler( + request: Request, + chat_question: ChatQuestion, + chat_id: UUID, + current_user: UserIdentityDep, + chat_service: ChatServiceDep, + brain_id: Annotated[UUID | None, Query()] = None, +): + # TODO: check logic into middleware + validate_authorization(user_id=current_user.id, brain_id=brain_id) + try: + rag_service = RAGService( + current_user, + brain_id, + chat_id, + brain_service, + prompt_service, + chat_service, + knowledge_service, + ) + chat_answer = await rag_service.generate_answer(chat_question.question) + + maybe_send_telemetry("question_asked", {"streaming": False}, request) + return chat_answer + + except AssertionError: + raise HTTPException( + status_code=422, + detail="inprocessable entity", + ) + except HTTPException as e: + raise e + + +# stream new question response from chat +@chat_router.post( + "/chat/{chat_id}/question/stream", + dependencies=[ + Depends( + AuthBearer(), + ), + ], + tags=["Chat"], +) +async def create_stream_question_handler( + request: Request, + chat_question: ChatQuestion, + chat_id: UUID, + chat_service: ChatServiceDep, + current_user: UserIdentityDep, + brain_id: Annotated[UUID | None, Query()] = None, +) -> StreamingResponse: + validate_authorization(user_id=current_user.id, brain_id=brain_id) + + logger.info( + f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}" + ) + + rag_service = RAGService( + current_user, + brain_id, + chat_id, + brain_service, + prompt_service, + chat_service, + knowledge_service, + ) + maybe_send_telemetry("question_asked", {"streaming": True}, request) + + return StreamingResponse( + rag_service.generate_answer_stream(chat_question.question), + media_type="text/event-stream", + ) + + +# get chat history +@chat_router.get( + "/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"] +) +async def get_chat_history_handler( + chat_id: UUID, + chat_service: ChatServiceDep, +) -> List[ChatItem]: + return await chat_service.get_chat_history_with_notifications(chat_id) + + +@chat_router.post( + "/chat/{chat_id}/question/answer", + dependencies=[Depends(AuthBearer())], + tags=["Chat"], +) +async def add_question_and_answer_handler( + chat_id: UUID, + question_and_answer: QuestionAndAnswer, + chat_service: ChatServiceDep, +) -> Optional[Chat]: + """ + Add a new question and anwser to the chat. + """ + history = await chat_service.add_question_and_answer(chat_id, question_and_answer) + # TODO(@aminediro) : Do we need to return the chat ?? + return history.chat diff --git a/backend/core/quivr_core/api/modules/chat/dto/__init__.py b/backend/core/quivr_core/api/modules/chat/dto/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/dto/chats.py b/backend/core/quivr_core/api/modules/chat/dto/chats.py new file mode 100644 index 000000000000..f499cf006f80 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/dto/chats.py @@ -0,0 +1,47 @@ +from enum import Enum +from typing import List, Optional, Tuple, Union +from uuid import UUID + +from pydantic import BaseModel + +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.notification.entity.notification import Notification + + +class ChatMessage(BaseModel): + model: str + question: str + # A list of tuples where each tuple is (speaker, text) + history: List[Tuple[str, str]] + temperature: float = 0.0 + max_tokens: int = 256 + use_summarization: bool = False + chat_id: Optional[UUID] = None + chat_name: Optional[str] = None + + +class ChatQuestion(BaseModel): + question: str + model: Optional[str] = None + temperature: Optional[float] = None + max_tokens: Optional[int] = None + brain_id: Optional[UUID] = None + prompt_id: Optional[UUID] = None + + +class Sources(BaseModel): + name: str + source_url: str + type: str + original_file_name: str + citation: str + + +class ChatItemType(Enum): + MESSAGE = "MESSAGE" + NOTIFICATION = "NOTIFICATION" + + +class ChatItem(BaseModel): + item_type: ChatItemType + body: Union[GetChatHistoryOutput, Notification] diff --git a/backend/core/quivr_core/api/modules/chat/dto/inputs.py b/backend/core/quivr_core/api/modules/chat/dto/inputs.py new file mode 100644 index 000000000000..d3acab722a2a --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/dto/inputs.py @@ -0,0 +1,46 @@ +from dataclasses import dataclass +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel + + +class CreateChatHistory(BaseModel): + chat_id: UUID + user_message: str + assistant: str + prompt_id: Optional[UUID] = None + brain_id: Optional[UUID] = None + metadata: Optional[dict] = {} + + +class QuestionAndAnswer(BaseModel): + question: str + answer: str + + +@dataclass +class CreateChatProperties: + name: str + + def __init__(self, name: str): + self.name = name + + +@dataclass +class ChatUpdatableProperties: + chat_name: Optional[str] = None + + def __init__(self, chat_name: Optional[str]): + self.chat_name = chat_name + + +class ChatMessageProperties(BaseModel, extra="ignore"): + thumbs: Optional[bool] + + def dict(self, *args, **kwargs): + chat_dict = super().dict(*args, **kwargs) + if chat_dict.get("thumbs"): + # Set thumbs to boolean value or None if not present + chat_dict["thumbs"] = bool(chat_dict["thumbs"]) + return chat_dict diff --git a/backend/core/quivr_core/api/modules/chat/dto/outputs.py b/backend/core/quivr_core/api/modules/chat/dto/outputs.py new file mode 100644 index 000000000000..86ee1fd6adba --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/dto/outputs.py @@ -0,0 +1,54 @@ +from datetime import datetime +from typing import List, Optional +from uuid import UUID + +from pydantic import BaseModel + + +class GetChatHistoryOutput(BaseModel): + chat_id: UUID + message_id: UUID + user_message: str + message_time: datetime + assistant: str | None = None + prompt_title: str | None = None + brain_name: str | None = None + brain_id: UUID | None = None # string because UUID is not JSON serializable + metadata: Optional[dict] | None = None + thumbs: Optional[bool] | None = None + + def dict(self, *args, **kwargs): + chat_history = super().dict(*args, **kwargs) + chat_history["chat_id"] = str(chat_history.get("chat_id")) + chat_history["message_id"] = str(chat_history.get("message_id")) + + return chat_history + + +class FunctionCall(BaseModel): + arguments: str + name: str + + +class ChatCompletionMessageToolCall(BaseModel): + id: str + function: FunctionCall + type: str = "function" + + +class CompletionMessage(BaseModel): + # = "assistant" | "user" | "system" | "tool" + role: str + content: str | None = None + tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None + + +class CompletionResponse(BaseModel): + finish_reason: str + message: CompletionMessage + + +class BrainCompletionOutput(BaseModel): + messages: List[CompletionMessage] + question: str + response: CompletionResponse diff --git a/backend/core/quivr_core/api/modules/chat/entity/__init__.py b/backend/core/quivr_core/api/modules/chat/entity/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/entity/chat.py b/backend/core/quivr_core/api/modules/chat/entity/chat.py new file mode 100644 index 000000000000..53720f344325 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/entity/chat.py @@ -0,0 +1,81 @@ +from datetime import datetime +from typing import List +from uuid import UUID + +from sqlalchemy.ext.asyncio import AsyncAttrs +from sqlmodel import JSON, TIMESTAMP, Column, Field, Relationship, SQLModel, text +from sqlmodel import UUID as PGUUID + +from quivr_core.api.modules.brain.entity.brain_entity import Brain +from quivr_core.api.modules.user.entity.user_identity import User + + +class Chat(SQLModel, table=True): + __tablename__ = "chats" # type: ignore + chat_id: UUID | None = Field( + default=None, + sa_column=Column( + PGUUID, + server_default=text("uuid_generate_v4()"), + primary_key=True, + ), + ) + chat_name: str | None + creation_time: datetime | None = Field( + default=None, + sa_column=Column( + TIMESTAMP(timezone=False), + server_default=text("CURRENT_TIMESTAMP"), + ), + ) + user_id: UUID | None = Field(default=None, foreign_key="users.id") + user: User | None = Relationship(back_populates="chats") # type: ignore + chat_history: List["ChatHistory"] | None = Relationship(back_populates="chat") # type: ignore + + +class ChatHistory(AsyncAttrs, SQLModel, table=True): + __tablename__ = "chat_history" # type: ignore # type : ignore + + message_id: UUID | None = Field( + default=None, + sa_column=Column( + PGUUID, + server_default=text("uuid_generate_v4()"), + primary_key=True, + ), + ) + chat_id: UUID | None = Field( + default=None, + foreign_key="chats.chat_id", + primary_key=True, + nullable=False, # Added nullable constraint + ) + chat: Chat | None = Relationship( + back_populates="chat_history", sa_relationship_kwargs={"lazy": "select"} + ) # type: ignore + user_message: str | None = None + assistant: str | None = None + message_time: datetime | None = Field( + default=None, + sa_column=Column( + TIMESTAMP(timezone=False), + server_default=text("CURRENT_TIMESTAMP"), + ), + ) + metadata_: dict | None = Field( + default=None, sa_column=Column("metadata", JSON, default=None) + ) + prompt_id: UUID | None = Field(default=None, foreign_key="prompts.id") + brain_id: UUID | None = Field( + default=None, + foreign_key="brains.brain_id", + ) + + thumbs: bool | None = None + brain: Brain | None = Relationship( + back_populates="brain_chat_history", sa_relationship_kwargs={"lazy": "select"} + ) # type: ignore + + class Config: + # Note: Pydantic can't generate schema for arbitrary types + arbitrary_types_allowed = True diff --git a/backend/core/quivr_core/api/modules/chat/repository/__init__.py b/backend/core/quivr_core/api/modules/chat/repository/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/repository/chats.py b/backend/core/quivr_core/api/modules/chat/repository/chats.py new file mode 100644 index 000000000000..4754be796f69 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/repository/chats.py @@ -0,0 +1,127 @@ +from typing import Sequence +from uuid import UUID + +from sqlalchemy import exc +from sqlmodel import select +from sqlmodel.ext.asyncio.session import AsyncSession + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.chat.dto.inputs import ( + ChatMessageProperties, + QuestionAndAnswer, +) +from quivr_core.api.modules.chat.entity.chat import Chat, ChatHistory +from quivr_core.api.modules.dependencies import BaseRepository + + +class ChatRepository(BaseRepository): + def __init__(self, session: AsyncSession): + super().__init__(session) + # TODO: for now use it instead of session + self.db = get_supabase_client() + + async def get_user_chats(self, user_id: UUID) -> Sequence[Chat]: + query = select(Chat).where(Chat.user_id == user_id) + response = await self.session.exec(query) + return response.all() + + async def create_chat(self, new_chat: Chat) -> Chat: + try: + self.session.add(new_chat) + await self.session.commit() + except exc.IntegrityError: + await self.session.rollback() + # TODO(@aminediro): Custom exceptions + raise Exception() + + await self.session.refresh(new_chat) + return new_chat + + async def get_chat_by_id(self, chat_id: UUID): + query = select(Chat).where(Chat.chat_id == chat_id) + response = await self.session.exec(query) + return response.one() + + async def get_chat_history(self, chat_id: UUID) -> Sequence[ChatHistory]: + query = ( + select(ChatHistory) + .where(ChatHistory.chat_id == chat_id) + # TODO: type hints of sqlmodel arent stable for order_by + .order_by(ChatHistory.message_time) # type: ignore + ) + response = await self.session.exec(query) + return response.all() + + async def add_question_and_answer( + self, chat_id: UUID, question_and_answer: QuestionAndAnswer + ) -> ChatHistory: + chat = ChatHistory( + chat_id=chat_id, + user_message=question_and_answer.question, + assistant=question_and_answer.answer, + ) + try: + self.session.add(chat) + await self.session.commit() + except exc.IntegrityError: + await self.session.rollback() + # TODO(@aminediro) : for now, build an exception system + raise Exception("can't create chat_history ") + await self.session.refresh(chat) + return chat + + def update_chat_history(self, chat_history): + response = ( + self.db.table("chat_history") + .insert( + { + "chat_id": str(chat_history.chat_id), + "user_message": chat_history.user_message, + "assistant": chat_history.assistant, + "prompt_id": ( + str(chat_history.prompt_id) if chat_history.prompt_id else None + ), + "brain_id": ( + str(chat_history.brain_id) if chat_history.brain_id else None + ), + "metadata": chat_history.metadata if chat_history.metadata else {}, + } + ) + .execute() + ) + return response + + def update_chat(self, chat_id, updates): + response = ( + self.db.table("chats").update(updates).match({"chat_id": chat_id}).execute() + ) + + return response + + def update_message_by_id(self, message_id, updates): + response = ( + self.db.table("chat_history") + .update(updates) + .match({"message_id": message_id}) + .execute() + ) + + return response + + def delete_chat(self, chat_id): + self.db.table("chats").delete().match({"chat_id": chat_id}).execute() + + def delete_chat_history(self, chat_id): + self.db.table("chat_history").delete().match({"chat_id": chat_id}).execute() + + def update_chat_message( + self, chat_id, message_id, chat_message_properties: ChatMessageProperties + ): + response = ( + self.db.table("chat_history") + .update(chat_message_properties) + .match({"message_id": message_id, "chat_id": chat_id}) + .execute() + ) + + return response diff --git a/backend/core/quivr_core/api/modules/chat/repository/chats_interface.py b/backend/core/quivr_core/api/modules/chat/repository/chats_interface.py new file mode 100644 index 000000000000..aff734cd5c9e --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/repository/chats_interface.py @@ -0,0 +1,93 @@ +from abc import ABC, abstractmethod +from typing import Optional +from uuid import UUID + +from quivr_core.api.modules.chat.dto.inputs import ( + ChatMessageProperties, + CreateChatHistory, + QuestionAndAnswer, +) +from quivr_core.api.modules.chat.entity.chat import Chat + + +class ChatsInterface(ABC): + @abstractmethod + def create_chat(self, new_chat): + """ + Insert a chat entry in "chats" db + """ + pass + + @abstractmethod + def get_chat_by_id(self, chat_id: str): + """ + Get chat details by chat_id + """ + pass + + @abstractmethod + def add_question_and_answer( + self, chat_id: UUID, question_and_answer: QuestionAndAnswer + ) -> Optional[Chat]: + """ + Add a question and answer to the chat history + """ + pass + + @abstractmethod + def get_chat_history(self, chat_id: str): + """ + Get chat history by chat_id + """ + pass + + @abstractmethod + def get_user_chats(self, user_id: str): + """ + Get all chats for a user + """ + pass + + @abstractmethod + def update_chat_history(self, chat_history: CreateChatHistory): + """ + Update chat history + """ + pass + + @abstractmethod + def update_chat(self, chat_id, updates): + """ + Update chat details + """ + pass + + @abstractmethod + def update_message_by_id(self, message_id, updates): + """ + Update message details + """ + pass + + @abstractmethod + def delete_chat(self, chat_id): + """ + Delete chat + """ + pass + + @abstractmethod + def delete_chat_history(self, chat_id): + """ + Delete chat history + """ + pass + + @abstractmethod + def update_chat_message( + self, chat_id, message_id, chat_message_properties: ChatMessageProperties + ): + """ + Update chat message + """ + pass diff --git a/backend/core/quivr_core/api/modules/chat/service/__init__.py b/backend/core/quivr_core/api/modules/chat/service/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/service/chat_service.py b/backend/core/quivr_core/api/modules/chat/service/chat_service.py new file mode 100644 index 000000000000..b533be9344f6 --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/service/chat_service.py @@ -0,0 +1,209 @@ +import random +from typing import List +from uuid import UUID + +from fastapi import HTTPException + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.brain_entity import Brain +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.chat.dto.chats import ChatItem +from quivr_core.api.modules.chat.dto.inputs import ( + ChatMessageProperties, + ChatUpdatableProperties, + CreateChatHistory, + CreateChatProperties, + QuestionAndAnswer, +) +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.entity.chat import Chat, ChatHistory +from quivr_core.api.modules.chat.repository.chats import ChatRepository +from quivr_core.api.modules.chat.service.utils import ( + merge_chat_history_and_notifications, +) +from quivr_core.api.modules.dependencies import BaseService +from quivr_core.api.modules.notification.service.notification_service import ( + NotificationService, +) +from quivr_core.api.modules.prompt.entity.prompt import Prompt +from quivr_core.api.modules.prompt.service.prompt_service import PromptService + +logger = get_logger(__name__) + +prompt_service = PromptService() +brain_service = BrainService() +notification_service = NotificationService() + + +class ChatService(BaseService[ChatRepository]): + repository_cls = ChatRepository + + def __init__(self, repository: ChatRepository): + self.repository = repository + + async def create_chat( + self, user_id: UUID, new_chat_data: CreateChatProperties + ) -> Chat: + # Chat is created upon the user's first question asked + logger.info(f"New chat entry in chats table for user {user_id}") + + inserted_chat = await self.repository.create_chat( + Chat(chat_name=new_chat_data.name, user_id=user_id) + ) + logger.info(f"Insert response {inserted_chat}") + + return inserted_chat + + def get_follow_up_question( + self, brain_id: UUID = None, question: str = None + ) -> [str]: + follow_up = [ + "Summarize the conversation", + "Explain in more detail", + "Explain like I'm 5", + "Provide a list", + "Give examples", + "Use simpler language", + "Elaborate on a specific point", + "Provide pros and cons", + "Break down into steps", + "Illustrate with an image or diagram", + ] + # Return 3 random follow up questions amongs the list + random3 = random.sample(follow_up, 3) + return random3 + + async def add_question_and_answer( + self, chat_id: UUID, question_and_answer: QuestionAndAnswer + ) -> ChatHistory: + return await self.repository.add_question_and_answer( + chat_id, question_and_answer + ) + + async def get_chat_by_id(self, chat_id: UUID) -> Chat: + chat = await self.repository.get_chat_by_id(chat_id) + return chat + + async def get_chat_history(self, chat_id: UUID) -> List[GetChatHistoryOutput]: + history = await self.repository.get_chat_history(chat_id) + enriched_history: List[GetChatHistoryOutput] = [] + if len(history) == 0: + return enriched_history + brain: Brain = await history[0].awaitable_attrs.brain + prompt: Prompt = await brain.awaitable_attrs.prompt + for message in history: + enriched_history.append( + # TODO : WHY bother with having ids here ?? + GetChatHistoryOutput( + chat_id=(message.chat_id), + message_id=message.message_id, + user_message=message.user_message, + assistant=message.assistant, + message_time=message.message_time, + brain_name=brain.name if brain else None, + brain_id=brain.brain_id if brain else None, + prompt_title=(prompt.title if prompt else None), + metadata=message.metadata_, + thumbs=message.thumbs, + ) + ) + return enriched_history + + async def get_chat_history_with_notifications( + self, + chat_id: UUID, + ) -> List[ChatItem]: + chat_history = await self.get_chat_history(chat_id) + chat_notifications = [] + return merge_chat_history_and_notifications(chat_history, chat_notifications) + + async def get_user_chats(self, user_id: UUID) -> List[Chat]: + return list(await self.repository.get_user_chats(user_id)) + + def update_chat_history(self, chat_history: CreateChatHistory) -> ChatHistory: + response: List[ChatHistory] = ( + self.repository.update_chat_history(chat_history) + ).data + if len(response) == 0: + raise HTTPException( + status_code=500, + detail="An exception occurred while updating chat history.", + ) + return ChatHistory(**response[0]) # pyright: ignore reportPrivateUsage=none + + def update_chat(self, chat_id, chat_data: ChatUpdatableProperties) -> Chat: + if not chat_id: + logger.error("No chat_id provided") + return # pyright: ignore reportPrivateUsage=none + + updates = {} + + if chat_data.chat_name is not None: + updates["chat_name"] = chat_data.chat_name + + updated_chat = None + + if updates: + updated_chat = (self.repository.update_chat(chat_id, updates)).data[0] + logger.info(f"Chat {chat_id} updated") + else: + logger.info(f"No updates to apply for chat {chat_id}") + return updated_chat # pyright: ignore reportPrivateUsage=none + + def update_message_by_id( + self, + message_id: str, + user_message: str = None, # pyright: ignore reportPrivateUsage=none + assistant: str = None, # pyright: ignore reportPrivateUsage=none + metadata: dict = None, # pyright: ignore reportPrivateUsage=none + ) -> ChatHistory: + if not message_id: + logger.error("No message_id provided") + return # pyright: ignore reportPrivateUsage=none + + updates = {} + + if user_message is not None: + updates["user_message"] = user_message + + if assistant is not None: + updates["assistant"] = assistant + + if metadata is not None: + updates["metadata"] = metadata + + updated_message = None + + if updates: + updated_message = ( + self.repository.update_message_by_id(message_id, updates) + ).data[ # type: ignore + 0 + ] + logger.info(f"Message {message_id} updated") + else: + logger.info(f"No updates to apply for message {message_id}") + return ChatHistory(**updated_message) # pyright: ignore reportPrivateUsage=none + + def delete_chat_from_db(self, chat_id): + try: + self.repository.delete_chat_history(chat_id) + except Exception as e: + print(e) + pass + try: + self.repository.delete_chat(chat_id) + except Exception as e: + print(e) + pass + + def update_chat_message( + self, chat_id, message_id, chat_message_properties: ChatMessageProperties + ): + try: + return self.repository.update_chat_message( + chat_id, message_id, chat_message_properties + ).data + except Exception as e: + print(e) + pass diff --git a/backend/core/quivr_core/api/modules/chat/service/utils.py b/backend/core/quivr_core/api/modules/chat/service/utils.py new file mode 100644 index 000000000000..9191d64d361f --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/service/utils.py @@ -0,0 +1,44 @@ +from typing import List + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.chat.dto.chats import ChatItem, ChatItemType +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.notification.entity.notification import Notification +from quivr_core.api.modules.notification.service.notification_service import ( + NotificationService, +) +from quivr_core.api.modules.prompt.service.prompt_service import PromptService + +logger = get_logger(__name__) + +prompt_service = PromptService() +brain_service = BrainService() +notification_service = NotificationService() + + +def merge_chat_history_and_notifications( + chat_history: List[GetChatHistoryOutput], notifications: List[Notification] +) -> List[ChatItem]: + chat_history_and_notifications = chat_history + notifications + + chat_history_and_notifications.sort( + key=lambda x: ( + x.message_time + if isinstance(x, GetChatHistoryOutput) and x.message_time + else x.datetime + ) + ) + + transformed_data = [] + for item in chat_history_and_notifications: + if isinstance(item, GetChatHistoryOutput): + item_type = ChatItemType.MESSAGE + body = item + else: + item_type = ChatItemType.NOTIFICATION + body = item + transformed_item = ChatItem(item_type=item_type, body=body) + transformed_data.append(transformed_item) + + return transformed_data diff --git a/backend/core/quivr_core/api/modules/chat/tests/conftest.py b/backend/core/quivr_core/api/modules/chat/tests/conftest.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/chat/tests/test_chats.py b/backend/core/quivr_core/api/modules/chat/tests/test_chats.py new file mode 100644 index 000000000000..26a0039b2d9e --- /dev/null +++ b/backend/core/quivr_core/api/modules/chat/tests/test_chats.py @@ -0,0 +1,177 @@ +import asyncio +import os +from typing import List, Tuple +from uuid import uuid4 + +import pytest +import pytest_asyncio +import sqlalchemy +from sqlalchemy.ext.asyncio import create_async_engine +from sqlmodel import create_engine, select +from sqlmodel.ext.asyncio.session import AsyncSession + +from quivr_core.api.modules.brain.entity.brain_entity import Brain, BrainType +from quivr_core.api.modules.chat.dto.inputs import QuestionAndAnswer +from quivr_core.api.modules.chat.entity.chat import Chat, ChatHistory +from quivr_core.api.modules.chat.repository.chats import ChatRepository +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.user.entity.user_identity import User + +pg_database_url = "postgres:postgres@localhost:54322/postgres" + + +@pytest.fixture(scope="session", autouse=True) +def db_setup(): + # setup + sync_engine = create_engine( + "postgresql://" + pg_database_url, + echo=True if os.getenv("ORM_DEBUG") else False, + ) + # TODO(@amine) : for now don't drop anything + yield sync_engine + # teardown + # NOTE: For now we rely on Supabase migrations for defining schemas + # SQLModel.metadata.create_all(sync_engine, checkfirst=True) + # SQLModel.metadata.drop_all(sync_engine) + + +@pytest_asyncio.fixture(scope="session") +async def async_engine(): + engine = create_async_engine( + "postgresql+asyncpg://" + pg_database_url, + echo=True if os.getenv("ORM_DEBUG") else False, + ) + yield engine + + +@pytest.fixture(scope="session") +def event_loop(request: pytest.FixtureRequest): + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + +@pytest_asyncio.fixture() +async def session(async_engine): + async with async_engine.connect() as conn: + await conn.begin() + await conn.begin_nested() + async_session = AsyncSession(conn, expire_on_commit=False) + + @sqlalchemy.event.listens_for( + async_session.sync_session, "after_transaction_end" + ) + def end_savepoint(session, transaction): + if conn.closed: + return + if not conn.in_nested_transaction(): + conn.sync_connection.begin_nested() + + yield async_session + + +TestData = Tuple[Brain, User, List[Chat], List[ChatHistory]] + + +@pytest_asyncio.fixture() +async def test_data( + session: AsyncSession, +) -> TestData: + # User data + user_1 = ( + await session.exec(select(User).where(User.email == "admin@quivr.app")) + ).one() + # Brain data + brain_1 = Brain(name="test_brain", description="this is a test brain") + # Chat data + chat_1 = Chat(chat_name="chat1", user=user_1) + chat_2 = Chat(chat_name="chat2", user=user_1) + + chat_history_1 = ChatHistory( + user_message="Hello", + assistant="Hello! How can I assist you today?", + chat=chat_1, + brain=brain_1, + ) + chat_history_2 = ChatHistory( + user_message="Hello", + assistant="Hello! How can I assist you today?", + chat=chat_1, + brain=brain_1, + ) + session.add(brain_1) + session.add(chat_1) + session.add(chat_2) + session.add(chat_history_1) + session.add(chat_history_2) + + await session.refresh(user_1) + await session.commit() + return brain_1, user_1, [chat_1, chat_2], [chat_history_1, chat_history_2] + + +@pytest.mark.asyncio +async def test_get_user_chats_empty(session): + repo = ChatRepository(session) + chats = await repo.get_user_chats(user_id=uuid4()) + assert len(chats) == 0 + + +@pytest.mark.asyncio +async def test_get_user_chats(session: AsyncSession, test_data: TestData): + _, local_user, chats, _ = test_data + repo = ChatRepository(session) + assert local_user.id is not None + query_chats = await repo.get_user_chats(local_user.id) + assert len(query_chats) == len(chats) + + +@pytest.mark.asyncio +async def test_get_chat_history(session: AsyncSession, test_data: TestData): + brain_1, _, chats, chat_history = test_data + assert chats[0].chat_id + assert len(chat_history) > 0 + assert chat_history[-1].message_time + assert chat_history[0].message_time + + repo = ChatRepository(session) + query_chat_history = await repo.get_chat_history(chats[0].chat_id) + assert chat_history == query_chat_history + assert query_chat_history[-1].message_time + assert query_chat_history[0].message_time + assert query_chat_history[-1].message_time >= query_chat_history[0].message_time + + # TODO: Should be tested in test_brain_repository + # Checks that brain is correct + assert query_chat_history[-1].brain is not None + assert query_chat_history[-1].brain.brain_type == BrainType.integration + + +@pytest.mark.asyncio +async def test_add_qa(session: AsyncSession, test_data: TestData): + _, _, [chat, *_], __ = test_data + assert chat.chat_id + qa = QuestionAndAnswer(question="question", answer="answer") + repo = ChatRepository(session) + resp_chat = await repo.add_question_and_answer(chat.chat_id, qa) + + assert resp_chat.chat_id == chat.chat_id + assert resp_chat.user_message == qa.question + assert resp_chat.assistant == qa.answer + + +## CHAT SERVICE + + +@pytest.mark.asyncio +async def test_service_get_chat_history(session: AsyncSession, test_data: TestData): + brain, _, [chat, *_], __ = test_data + assert chat.chat_id + repo = ChatRepository(session) + service = ChatService(repo) + history = await service.get_chat_history(chat.chat_id) + + assert len(history) > 0 + assert all(h.chat_id == chat.chat_id for h in history) + assert history[0].brain_name == brain.name + assert history[0].brain_id == brain.brain_id diff --git a/backend/core/quivr_core/api/modules/dependencies.py b/backend/core/quivr_core/api/modules/dependencies.py new file mode 100644 index 000000000000..c59e4533e4fc --- /dev/null +++ b/backend/core/quivr_core/api/modules/dependencies.py @@ -0,0 +1,71 @@ +import os +from typing import AsyncGenerator, Callable, Generic, Type, TypeVar +from uuid import uuid4 + +from fastapi import Depends +from sqlalchemy.ext.asyncio import create_async_engine +from sqlmodel.ext.asyncio.session import AsyncSession + +from quivr_core.api.models.settings import settings +from quivr_core.api.modules.user.entity.user_identity import UserIdentity + + +class BaseRepository: + def __init__(self, session: AsyncSession): + self.session = session + + +R = TypeVar("R", bound=BaseRepository) + + +class BaseService(Generic[R]): + # associated repository type + repository_cls: Type[R] + + def __init__(self, repository: R): + self.repository = repository + + @classmethod + def get_repository_cls(cls) -> Type[R]: + return cls.repository_cls # type: ignore + + +S = TypeVar("S", bound=BaseService) + +# TODO: env variable debug sql_alchemy +async_engine = create_async_engine( + settings.pg_database_async_url, + echo=True if os.getenv("ORM_DEBUG") else False, + future=True, +) + + +async def get_async_session() -> AsyncGenerator[AsyncSession, None]: + async with AsyncSession(async_engine) as session: + yield session + + +def get_repository(repository_model: Type[R]) -> Callable[..., R]: + def _get_repository(session: AsyncSession = Depends(get_async_session)) -> R: + return repository_model(session) + + return _get_repository + + +def get_service(service: Type[S]) -> Callable[..., S]: + def _get_service( + repository: BaseRepository = Depends( + get_repository(service.get_repository_cls()) + ), + ) -> S: + return service(repository) + + return _get_service + + +def get_current_user() -> UserIdentity: + # TODO: get it one time from db + return UserIdentity( + id=uuid4(), + email="admin@quivr.app", + ) diff --git a/backend/core/quivr_core/api/modules/knowledge/__init__.py b/backend/core/quivr_core/api/modules/knowledge/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/knowledge/controller/__init__.py b/backend/core/quivr_core/api/modules/knowledge/controller/__init__.py new file mode 100644 index 000000000000..911883cdc250 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/controller/__init__.py @@ -0,0 +1 @@ +from .knowledge_routes import knowledge_router diff --git a/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py b/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py new file mode 100644 index 000000000000..ea04d1f9e50d --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py @@ -0,0 +1,102 @@ +from uuid import UUID + +from fastapi import APIRouter, Depends, HTTPException, Query + +from quivr_core.api.logger import get_logger +from quivr_core.api.middlewares.auth import AuthBearer, get_current_user +from quivr_core.api.modules.brain.entity.brain_entity import RoleEnum +from quivr_core.api.modules.brain.service.brain_authorization_service import ( + has_brain_authorization, + validate_brain_authorization, +) +from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService +from quivr_core.api.modules.upload.service.generate_file_signed_url import ( + generate_file_signed_url, +) +from quivr_core.api.modules.user.entity.user_identity import UserIdentity + +knowledge_router = APIRouter() +logger = get_logger(__name__) + +knowledge_service = KnowledgeService() + + +@knowledge_router.get( + "/knowledge", dependencies=[Depends(AuthBearer())], tags=["Knowledge"] +) +async def list_knowledge_in_brain_endpoint( + brain_id: UUID = Query(..., description="The ID of the brain"), + current_user: UserIdentity = Depends(get_current_user), +): + """ + Retrieve and list all the knowledge in a brain. + """ + + validate_brain_authorization(brain_id=brain_id, user_id=current_user.id) + + knowledges = knowledge_service.get_all_knowledge(brain_id) + + return {"knowledges": knowledges} + + +@knowledge_router.delete( + "/knowledge/{knowledge_id}", + dependencies=[ + Depends(AuthBearer()), + Depends(has_brain_authorization(RoleEnum.Owner)), + ], + tags=["Knowledge"], +) +async def delete_endpoint( + knowledge_id: UUID, + current_user: UserIdentity = Depends(get_current_user), + brain_id: UUID = Query(..., description="The ID of the brain"), +): + """ + Delete a specific knowledge from a brain. + """ + + knowledge = knowledge_service.get_knowledge(knowledge_id) + file_name = knowledge.file_name if knowledge.file_name else knowledge.url + knowledge_service.remove_knowledge(knowledge_id) + + brain_vector_service = BrainVectorService(brain_id) + if knowledge.file_name: + brain_vector_service.delete_file_from_brain(knowledge.file_name) + elif knowledge.url: + brain_vector_service.delete_file_url_from_brain(knowledge.url) + + return { + "message": f"{file_name} of brain {brain_id} has been deleted by user {current_user.email}." + } + + +@knowledge_router.get( + "/knowledge/{knowledge_id}/signed_download_url", + dependencies=[Depends(AuthBearer())], + tags=["Knowledge"], +) +async def generate_signed_url_endpoint( + knowledge_id: UUID, + current_user: UserIdentity = Depends(get_current_user), +): + """ + Generate a signed url to download the file from storage. + """ + + knowledge = knowledge_service.get_knowledge(knowledge_id) + + validate_brain_authorization(brain_id=knowledge.brain_id, user_id=current_user.id) + + if knowledge.file_name == None: + raise HTTPException( + status_code=404, + detail=f"Knowledge with id {knowledge_id} is not a file.", + ) + + file_path_in_storage = f"{knowledge.brain_id}/{knowledge.file_name}" + + file_signed_url = generate_file_signed_url(file_path_in_storage) + + return file_signed_url diff --git a/backend/core/quivr_core/api/modules/knowledge/dto/__init__.py b/backend/core/quivr_core/api/modules/knowledge/dto/__init__.py new file mode 100644 index 000000000000..4f3a4b9f7bb5 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/dto/__init__.py @@ -0,0 +1,2 @@ +from .inputs import CreateKnowledgeProperties +from .outputs import DeleteKnowledgeResponse diff --git a/backend/core/quivr_core/api/modules/knowledge/dto/inputs.py b/backend/core/quivr_core/api/modules/knowledge/dto/inputs.py new file mode 100644 index 000000000000..b6d008c35f24 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/dto/inputs.py @@ -0,0 +1,18 @@ +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel + + +class CreateKnowledgeProperties(BaseModel): + brain_id: UUID + file_name: Optional[str] = None + url: Optional[str] = None + extension: str = "txt" + integration: Optional[str] = None + integration_link: Optional[str] = None + + def dict(self, *args, **kwargs): + knowledge_dict = super().dict(*args, **kwargs) + knowledge_dict["brain_id"] = str(knowledge_dict.get("brain_id")) + return knowledge_dict diff --git a/backend/core/quivr_core/api/modules/knowledge/dto/outputs.py b/backend/core/quivr_core/api/modules/knowledge/dto/outputs.py new file mode 100644 index 000000000000..905da7cf0d7f --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/dto/outputs.py @@ -0,0 +1,8 @@ +from uuid import UUID + +from pydantic import BaseModel + + +class DeleteKnowledgeResponse(BaseModel): + status: str = "delete" + knowledge_id: UUID diff --git a/backend/core/quivr_core/api/modules/knowledge/entity/__init__.py b/backend/core/quivr_core/api/modules/knowledge/entity/__init__.py new file mode 100644 index 000000000000..6f7620c446d2 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/entity/__init__.py @@ -0,0 +1 @@ +from .knowledge import Knowledge diff --git a/backend/core/quivr_core/api/modules/knowledge/entity/knowledge.py b/backend/core/quivr_core/api/modules/knowledge/entity/knowledge.py new file mode 100644 index 000000000000..504162d0c644 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/entity/knowledge.py @@ -0,0 +1,12 @@ +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel + + +class Knowledge(BaseModel): + id: UUID + brain_id: UUID + file_name: Optional[str] = None + url: Optional[str] = None + extension: str = "txt" diff --git a/backend/core/quivr_core/api/modules/knowledge/repository/__init__.py b/backend/core/quivr_core/api/modules/knowledge/repository/__init__.py new file mode 100644 index 000000000000..80f13aa5f77f --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/repository/__init__.py @@ -0,0 +1 @@ +from .knowledges import KnowledgeRepository diff --git a/backend/core/quivr_core/api/modules/knowledge/repository/knowledge_interface.py b/backend/core/quivr_core/api/modules/knowledge/repository/knowledge_interface.py new file mode 100644 index 000000000000..7d36e3b02bcc --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/repository/knowledge_interface.py @@ -0,0 +1,58 @@ +from abc import ABC, abstractmethod +from typing import List +from uuid import UUID + +from quivr_core.api.modules.knowledge.dto.inputs import CreateKnowledgeProperties +from quivr_core.api.modules.knowledge.dto.outputs import DeleteKnowledgeResponse +from quivr_core.api.modules.knowledge.entity.knowledge import Knowledge + + +class KnowledgeInterface(ABC): + @abstractmethod + def insert_knowledge(self, knowledge: CreateKnowledgeProperties) -> Knowledge: + """ + Add a knowledge + """ + pass + + @abstractmethod + def remove_knowledge_by_id( + # todo: update remove brain endpoints to first delete the knowledge + self, + knowledge_id: UUID, + ) -> DeleteKnowledgeResponse: + """ + Args: + knowledge_id (UUID): The id of the knowledge + + Returns: + str: Status message + """ + pass + + @abstractmethod + def get_knowledge_by_id(self, knowledge_id: UUID) -> Knowledge: + """ + Get a knowledge by its id + Args: + brain_id (UUID): The id of the brain + """ + pass + + @abstractmethod + def get_all_knowledge_in_brain(self, brain_id: UUID) -> List[Knowledge]: + """ + Get all the knowledge in a brain + Args: + brain_id (UUID): The id of the brain + """ + pass + + @abstractmethod + def remove_brain_all_knowledge(self, brain_id: UUID) -> None: + """ + Remove all knowledge in a brain + Args: + brain_id (UUID): The id of the brain + """ + pass diff --git a/backend/core/quivr_core/api/modules/knowledge/repository/knowledges.py b/backend/core/quivr_core/api/modules/knowledge/repository/knowledges.py new file mode 100644 index 000000000000..b6896f1ee315 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/repository/knowledges.py @@ -0,0 +1,114 @@ +from uuid import UUID + +from fastapi import HTTPException + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.knowledge.dto.outputs import DeleteKnowledgeResponse +from quivr_core.api.modules.knowledge.entity.knowledge import Knowledge +from quivr_core.api.modules.knowledge.repository.knowledge_interface import ( + KnowledgeInterface, +) + + +class KnowledgeRepository(KnowledgeInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def insert_knowledge(self, knowledge): + """ + Add a knowledge + """ + # Check if the knowledge already exists + knowledge_exists = ( + self.db.from_("knowledge") + .select("*") + .filter("brain_id", "eq", knowledge.brain_id) + .filter("file_name", "eq", knowledge.file_name) + .execute() + ).data + + if knowledge_exists: + return Knowledge(**knowledge_exists[0]) # TODO fix this + + response = (self.db.from_("knowledge").insert(knowledge.dict()).execute()).data + return Knowledge(**response[0]) + + def remove_knowledge_by_id( + # todo: update remove brain endpoints to first delete the knowledge + self, + knowledge_id, + ): + """ + Args: + knowledge_id (UUID): The id of the knowledge + + Returns: + str: Status message + """ + response = ( + self.db.from_("knowledge") + .delete() + .filter("id", "eq", knowledge_id) + .execute() + .data + ) + + if response == []: + raise HTTPException(404, "Knowledge not found") + + return DeleteKnowledgeResponse( + # change to response[0].brain_id and knowledge_id[0].brain_id + status="deleted", + knowledge_id=knowledge_id, + ) + + def get_knowledge_by_id(self, knowledge_id): + """ + Get a knowledge by its id + Args: + brain_id (UUID): The id of the brain + """ + knowledge = ( + self.db.from_("knowledge") + .select("*") + .filter("id", "eq", str(knowledge_id)) + .execute() + ).data + + return Knowledge(**knowledge[0]) + + def get_all_knowledge_in_brain(self, brain_id: UUID) -> list[Knowledge]: + """ + Get all the knowledge in a brain + Args: + brain_id (UUID): The id of the brain + """ + all_knowledge = ( + self.db.from_("knowledge") + .select("*") + .filter("brain_id", "eq", str(brain_id)) + .execute() + ).data + + return [Knowledge(**knowledge) for knowledge in all_knowledge] + + def remove_brain_all_knowledge(self, brain_id): + """ + Remove all knowledge in a brain + Args: + brain_id (UUID): The id of the brain + """ + all_knowledge = self.get_all_knowledge_in_brain(brain_id) + knowledge_to_delete_list = [] + + for knowledge in all_knowledge: + if knowledge.file_name: + knowledge_to_delete_list.append(f"{brain_id}/{knowledge.file_name}") + + if knowledge_to_delete_list: + self.db.storage.from_("quivr").remove(knowledge_to_delete_list) + + self.db.from_("knowledge").delete().filter( + "brain_id", "eq", str(brain_id) + ).execute() diff --git a/backend/core/quivr_core/api/modules/knowledge/repository/storage.py b/backend/core/quivr_core/api/modules/knowledge/repository/storage.py new file mode 100644 index 000000000000..b98b041a1753 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/repository/storage.py @@ -0,0 +1,30 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.knowledge.repository.storage_interface import ( + StorageInterface, +) + +logger = get_logger(__name__) + + +class Storage(StorageInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def upload_file(self, file_name: str): + """ + Upload file to storage + """ + self.db.storage.from_("quivr").download(file_name) + + def remove_file(self, file_name: str): + """ + Remove file from storage + """ + try: + response = self.db.storage.from_("quivr").remove([file_name]) + return response + except Exception as e: + logger.error(e) + # raise e diff --git a/backend/core/quivr_core/api/modules/knowledge/repository/storage_interface.py b/backend/core/quivr_core/api/modules/knowledge/repository/storage_interface.py new file mode 100644 index 000000000000..228c998276e3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/repository/storage_interface.py @@ -0,0 +1,10 @@ +from abc import ABC, abstractmethod + + +class StorageInterface(ABC): + @abstractmethod + def remove_file(self, file_name: str): + """ + Remove file from storage + """ + pass diff --git a/backend/core/quivr_core/api/modules/knowledge/service/__init__.py b/backend/core/quivr_core/api/modules/knowledge/service/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/knowledge/service/knowledge_service.py b/backend/core/quivr_core/api/modules/knowledge/service/knowledge_service.py new file mode 100644 index 000000000000..6798f6f24a85 --- /dev/null +++ b/backend/core/quivr_core/api/modules/knowledge/service/knowledge_service.py @@ -0,0 +1,45 @@ +from uuid import UUID + +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.knowledge.dto.inputs import CreateKnowledgeProperties +from quivr_core.api.modules.knowledge.entity.knowledge import Knowledge +from quivr_core.api.modules.knowledge.repository.knowledge_interface import ( + KnowledgeInterface, +) +from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository + +logger = get_logger(__name__) + + +class KnowledgeService: + repository: KnowledgeInterface + + def __init__(self): + self.repository = KnowledgeRepository() + + def add_knowledge(self, knowledge_to_add: CreateKnowledgeProperties): + knowledge = self.repository.insert_knowledge(knowledge_to_add) + + return knowledge + + def get_all_knowledge(self, brain_id: UUID): + knowledges = self.repository.get_all_knowledge_in_brain(brain_id) + + return knowledges + + def get_knowledge(self, knowledge_id: UUID) -> Knowledge: + knowledge = self.repository.get_knowledge_by_id(knowledge_id) + + return knowledge + + def remove_brain_all_knowledge(self, brain_id: UUID) -> None: + self.repository.remove_brain_all_knowledge(brain_id) + + logger.info( + f"All knowledge in brain {brain_id} removed successfully from table" + ) + + def remove_knowledge(self, knowledge_id: UUID): + message = self.repository.remove_knowledge_by_id(knowledge_id) + + return message diff --git a/backend/core/quivr_core/api/modules/prompt/__init__.py b/backend/core/quivr_core/api/modules/prompt/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/prompt/controller/__init__.py b/backend/core/quivr_core/api/modules/prompt/controller/__init__.py new file mode 100644 index 000000000000..225300b6f7bc --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/controller/__init__.py @@ -0,0 +1 @@ +from .prompt_routes import prompt_router diff --git a/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py b/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py new file mode 100644 index 000000000000..e51c5b540927 --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py @@ -0,0 +1,56 @@ +from uuid import UUID + +from fastapi import APIRouter, Depends + +from quivr_core.api.middlewares.auth import AuthBearer +from quivr_core.api.modules.prompt.entity.prompt import ( + CreatePromptProperties, + Prompt, + PromptUpdatableProperties, +) +from quivr_core.api.modules.prompt.service import PromptService + +prompt_router = APIRouter() + +promptService = PromptService() + + +@prompt_router.get("/prompts", dependencies=[Depends(AuthBearer())], tags=["Prompt"]) +async def get_prompts() -> list[Prompt]: + """ + Retrieve all public prompt + """ + return promptService.get_public_prompts() + + +@prompt_router.get( + "/prompts/{prompt_id}", dependencies=[Depends(AuthBearer())], tags=["Prompt"] +) +async def get_prompt(prompt_id: UUID) -> Prompt | None: + """ + Retrieve a prompt by its id + """ + + return promptService.get_prompt_by_id(prompt_id) + + +@prompt_router.put( + "/prompts/{prompt_id}", dependencies=[Depends(AuthBearer())], tags=["Prompt"] +) +async def update_prompt( + prompt_id: UUID, prompt: PromptUpdatableProperties +) -> Prompt | None: + """ + Update a prompt by its id + """ + + return promptService.update_prompt_by_id(prompt_id, prompt) + + +@prompt_router.post("/prompts", dependencies=[Depends(AuthBearer())], tags=["Prompt"]) +async def create_prompt_route(prompt: CreatePromptProperties) -> Prompt | None: + """ + Create a prompt by its id + """ + + return promptService.create_prompt(prompt) diff --git a/backend/core/quivr_core/api/modules/prompt/entity/__init__.py b/backend/core/quivr_core/api/modules/prompt/entity/__init__.py new file mode 100644 index 000000000000..324aeee09ffc --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/entity/__init__.py @@ -0,0 +1,7 @@ +from .prompt import ( + CreatePromptProperties, + DeletePromptResponse, + Prompt, + PromptStatusEnum, + PromptUpdatableProperties, +) diff --git a/backend/core/quivr_core/api/modules/prompt/entity/prompt.py b/backend/core/quivr_core/api/modules/prompt/entity/prompt.py new file mode 100644 index 000000000000..2e91ee7bd4ce --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/entity/prompt.py @@ -0,0 +1,53 @@ +from enum import Enum +from typing import List, Optional +from uuid import UUID + +from pydantic import BaseModel +from sqlmodel import UUID as PGUUID +from sqlmodel import Column, Field, Relationship, SQLModel, text + + +class PromptStatusEnum(str, Enum): + private = "private" + public = "public" + + +class Prompt(SQLModel, table=True): + __tablename__ = "prompts" # type: ignore + id: UUID | None = Field( + default=None, + sa_column=Column( + PGUUID, + server_default=text("uuid_generate_v4()"), + primary_key=True, + ), + ) + content: str | None = None + title: str | None = Field(default=None, max_length=255) + status: str = Field(default="private", max_length=255) + brain: List["Brain"] = Relationship( # noqa: F821 + back_populates="prompt", sa_relationship_kwargs={"lazy": "joined"} + ) + + +class CreatePromptProperties(BaseModel): + """Properties that can be received on prompt creation""" + + title: str + content: str + status: PromptStatusEnum = PromptStatusEnum.private + + +class PromptUpdatableProperties(BaseModel): + """Properties that can be received on prompt update""" + + title: Optional[str] = None + content: Optional[str] = None + status: Optional[PromptStatusEnum] = None + + +class DeletePromptResponse(BaseModel): + """Response when deleting a prompt""" + + status: str = "delete" + prompt_id: UUID diff --git a/backend/core/quivr_core/api/modules/prompt/repository/__init__.py b/backend/core/quivr_core/api/modules/prompt/repository/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/prompt/repository/prompts.py b/backend/core/quivr_core/api/modules/prompt/repository/prompts.py new file mode 100644 index 000000000000..6cf416b170cb --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/repository/prompts.py @@ -0,0 +1,103 @@ +from fastapi import HTTPException + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.prompt.entity.prompt import Prompt +from quivr_core.api.modules.prompt.repository.prompts_interface import ( + DeletePromptResponse, + PromptsInterface, +) + + +class Prompts(PromptsInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def create_prompt(self, prompt): + """ + Create a prompt + """ + + response = (self.db.from_("prompts").insert(prompt.dict()).execute()).data + + return Prompt(**response[0]) + + def delete_prompt_by_id(self, prompt_id): + """ + Delete a prompt by id + Args: + prompt_id (UUID): The id of the prompt + + Returns: + A dictionary containing the status of the delete and prompt_id of the deleted prompt + """ + + # Update brains where prompt_id is equal to the value to NULL + self.db.from_("brains").update({"prompt_id": None}).filter( + "prompt_id", "eq", prompt_id + ).execute() + + # Update chat_history where prompt_id is equal to the value to NULL + self.db.from_("chat_history").update({"prompt_id": None}).filter( + "prompt_id", "eq", prompt_id + ).execute() + + # Delete the prompt + response = ( + self.db.from_("prompts") + .delete() + .filter("id", "eq", prompt_id) + .execute() + .data + ) + + if response == []: + raise HTTPException(404, "Prompt not found") + + return DeletePromptResponse(status="deleted", prompt_id=prompt_id) + + def get_prompt_by_id(self, prompt_id): + """ + Get a prompt by its id + + Args: + prompt_id (UUID): The id of the prompt + + Returns: + Prompt: The prompt + """ + + response = ( + self.db.from_("prompts").select("*").filter("id", "eq", prompt_id).execute() + ).data + + if response == []: + return None + return Prompt(**response[0]) + + def get_public_prompts(self): + """ + List all public prompts + """ + + return ( + self.db.from_("prompts") + .select("*") + .filter("status", "eq", "public") + .execute() + ).data + + def update_prompt_by_id(self, prompt_id, prompt): + """Update a prompt by id""" + + response = ( + self.db.from_("prompts") + .update(prompt.dict(exclude_unset=True)) + .filter("id", "eq", prompt_id) + .execute() + ).data + + if response == []: + raise HTTPException(404, "Prompt not found") + + return Prompt(**response[0]) diff --git a/backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py b/backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py new file mode 100644 index 000000000000..9a838cc3a515 --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py @@ -0,0 +1,57 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_core.api.modules.prompt.entity import ( + CreatePromptProperties, + DeletePromptResponse, + Prompt, + PromptUpdatableProperties, +) + + +class PromptsInterface(ABC): + @abstractmethod + def create_prompt(self, prompt: CreatePromptProperties) -> Prompt: + """ + Create a prompt + """ + pass + + @abstractmethod + def delete_prompt_by_id(self, prompt_id: UUID) -> DeletePromptResponse: + """ + Delete a prompt by id + Args: + prompt_id (UUID): The id of the prompt + + Returns: + A dictionary containing the status of the delete and prompt_id of the deleted prompt + """ + pass + + @abstractmethod + def get_prompt_by_id(self, prompt_id: UUID) -> Prompt | None: + """ + Get a prompt by its id + + Args: + prompt_id (UUID): The id of the prompt + + Returns: + Prompt: The prompt + """ + pass + + @abstractmethod + def get_public_prompts(self) -> list[Prompt]: + """ + List all public prompts + """ + pass + + @abstractmethod + def update_prompt_by_id( + self, prompt_id: UUID, prompt: PromptUpdatableProperties + ) -> Prompt: + """Update a prompt by id""" + pass diff --git a/backend/core/quivr_core/api/modules/prompt/service/__init__.py b/backend/core/quivr_core/api/modules/prompt/service/__init__.py new file mode 100644 index 000000000000..04be1eb3c873 --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/service/__init__.py @@ -0,0 +1 @@ +from .prompt_service import PromptService diff --git a/backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py b/backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py new file mode 100644 index 000000000000..f96594d41452 --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py @@ -0,0 +1,17 @@ +from typing import Optional +from uuid import UUID + +from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( + get_prompt_to_use_id, +) +from quivr_core.api.modules.prompt.service import PromptService + +promptService = PromptService() + + +def get_prompt_to_use(brain_id: Optional[UUID], prompt_id: Optional[UUID]) -> str: + prompt_to_use_id = get_prompt_to_use_id(brain_id, prompt_id) + if prompt_to_use_id is None: + return None + + return promptService.get_prompt_by_id(prompt_to_use_id) diff --git a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py new file mode 100644 index 000000000000..f1659303f14c --- /dev/null +++ b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py @@ -0,0 +1,59 @@ +from typing import List +from uuid import UUID + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.prompt.entity.prompt import ( + CreatePromptProperties, + DeletePromptResponse, + Prompt, + PromptUpdatableProperties, +) +from quivr_core.api.modules.prompt.repository.prompts import Prompts + + +class PromptService: + repository: Prompts + + def __init__(self): + supabase_client = get_supabase_client() + self.repository = Prompts() + + def create_prompt(self, prompt: CreatePromptProperties) -> Prompt: + return self.repository.create_prompt(prompt) + + def delete_prompt_by_id(self, prompt_id: UUID) -> DeletePromptResponse: + """ + Delete a prompt by id + Args: + prompt_id (UUID): The id of the prompt + + Returns: + Prompt: The prompt + """ + return self.repository.delete_prompt_by_id(prompt_id) + + def get_prompt_by_id(self, prompt_id: UUID) -> Prompt | None: + """ + Get a prompt by its id + + Args: + prompt_id (UUID): The id of the prompt + + Returns: + Prompt: The prompt + """ + return self.repository.get_prompt_by_id(prompt_id) + + def get_public_prompts(self) -> List[Prompt]: + """ + List all public prompts + """ + + return self.repository.get_public_prompts() + + def update_prompt_by_id( + self, prompt_id: UUID, prompt: PromptUpdatableProperties + ) -> Prompt: + """Update a prompt by id""" + + return self.repository.update_prompt_by_id(prompt_id, prompt) diff --git a/backend/core/quivr_core/api/modules/prompt/tests/test_prompt.py b/backend/core/quivr_core/api/modules/prompt/tests/test_prompt.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/upload/__init__.py b/backend/core/quivr_core/api/modules/upload/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/upload/controller/__init__.py b/backend/core/quivr_core/api/modules/upload/controller/__init__.py new file mode 100644 index 000000000000..56030aa683eb --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/controller/__init__.py @@ -0,0 +1 @@ +from .upload_routes import upload_router diff --git a/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py b/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py new file mode 100644 index 000000000000..a8da251b4daa --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py @@ -0,0 +1,120 @@ +import os +from typing import Optional +from uuid import UUID + +from fastapi import APIRouter, Depends, HTTPException, Query, UploadFile + +from quivr_core.api.celery_worker import process_file_and_notify +from quivr_core.api.logger import get_logger +from quivr_core.api.middlewares.auth import AuthBearer, get_current_user +from quivr_core.api.modules.brain.entity.brain_entity import RoleEnum +from quivr_core.api.modules.brain.service.brain_authorization_service import ( + validate_brain_authorization, +) +from quivr_core.api.modules.knowledge.dto.inputs import CreateKnowledgeProperties +from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService +from quivr_core.api.modules.notification.dto.inputs import ( + CreateNotification, + NotificationUpdatableProperties, +) +from quivr_core.api.modules.notification.entity.notification import ( + NotificationsStatusEnum, +) +from quivr_core.api.modules.notification.service.notification_service import ( + NotificationService, +) +from quivr_core.api.modules.upload.service.upload_file import upload_file_storage +from quivr_core.api.modules.user.entity.user_identity import UserIdentity +from quivr_core.api.modules.user.service.user_usage import UserUsage +from quivr_core.api.packages.files.file import convert_bytes, get_file_size +from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry + +logger = get_logger(__name__) +upload_router = APIRouter() + +notification_service = NotificationService() +knowledge_service = KnowledgeService() + + +@upload_router.get("/upload/healthz", tags=["Health"]) +async def healthz(): + return {"status": "ok"} + + +@upload_router.post("/upload", dependencies=[Depends(AuthBearer())], tags=["Upload"]) +async def upload_file( + uploadFile: UploadFile, + brain_id: UUID = Query(..., description="The ID of the brain"), + chat_id: Optional[UUID] = Query(None, description="The ID of the chat"), + current_user: UserIdentity = Depends(get_current_user), +): + validate_brain_authorization( + brain_id, current_user.id, [RoleEnum.Editor, RoleEnum.Owner] + ) + uploadFile.file.seek(0) + user_daily_usage = UserUsage( + id=current_user.id, + email=current_user.email, + ) + + upload_notification = notification_service.add_notification( + CreateNotification( + user_id=current_user.id, + status=NotificationsStatusEnum.INFO, + title=f"Processing File {uploadFile.filename}", + ) + ) + + user_settings = user_daily_usage.get_user_settings() + + remaining_free_space = user_settings.get("max_brain_size", 1000000000) + maybe_send_telemetry("upload_file", {"file_name": uploadFile.filename}) + file_size = get_file_size(uploadFile) + if remaining_free_space - file_size < 0: + message = f"Brain will exceed maximum capacity. Maximum file allowed is : {convert_bytes(remaining_free_space)}" + raise HTTPException(status_code=403, detail=message) + + file_content = await uploadFile.read() + + filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename) + + try: + upload_file_storage(file_content, filename_with_brain_id) + + except Exception as e: + print(e) + + notification_service.update_notification_by_id( + upload_notification.id if upload_notification else None, + NotificationUpdatableProperties( + status=NotificationsStatusEnum.ERROR, + description=f"There was an error uploading the file: {e}", + ), + ) + if "The resource already exists" in str(e): + raise HTTPException( + status_code=403, + detail=f"File {uploadFile.filename} already exists in storage.", + ) + else: + raise HTTPException( + status_code=500, detail=f"Failed to upload file to storage. {e}" + ) + + knowledge_to_add = CreateKnowledgeProperties( + brain_id=brain_id, + file_name=uploadFile.filename, + extension=os.path.splitext( + uploadFile.filename # pyright: ignore reportPrivateUsage=none + )[-1].lower(), + ) + + knowledge_service.add_knowledge(knowledge_to_add) + + process_file_and_notify.delay( + file_name=filename_with_brain_id, + file_original_name=uploadFile.filename, + brain_id=brain_id, + notification_id=upload_notification.id, + ) + return {"message": "File processing has started."} diff --git a/backend/core/quivr_core/api/modules/upload/service/__init__.py b/backend/core/quivr_core/api/modules/upload/service/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/upload/service/generate_file_signed_url.py b/backend/core/quivr_core/api/modules/upload/service/generate_file_signed_url.py new file mode 100644 index 000000000000..aaaf4b727473 --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/service/generate_file_signed_url.py @@ -0,0 +1,27 @@ +from multiprocessing import get_logger + +from supabase.client import Client + +from quivr_core.api.models.settings import get_supabase_client + +logger = get_logger() + +SIGNED_URL_EXPIRATION_PERIOD_IN_SECONDS = 3600 + + +def generate_file_signed_url(path): + supabase_client: Client = get_supabase_client() + + try: + response = supabase_client.storage.from_("quivr").create_signed_url( + path, + SIGNED_URL_EXPIRATION_PERIOD_IN_SECONDS, + options={ + "download": True, + "transform": None, + }, + ) + logger.info("RESPONSE SIGNED URL", response) + return response + except Exception as e: + logger.error(e) diff --git a/backend/core/quivr_core/api/modules/upload/service/list_files.py b/backend/core/quivr_core/api/modules/upload/service/list_files.py new file mode 100644 index 000000000000..1a9c3faf38ce --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/service/list_files.py @@ -0,0 +1,17 @@ +from multiprocessing import get_logger + +from supabase.client import Client + +from quivr_core.api.models.settings import get_supabase_client + +logger = get_logger() + + +def list_files_from_storage(path): + supabase_client: Client = get_supabase_client() + + try: + response = supabase_client.storage.from_("quivr").list(path) + return response + except Exception as e: + logger.error(e) diff --git a/backend/core/quivr_core/api/modules/upload/service/upload_file.py b/backend/core/quivr_core/api/modules/upload/service/upload_file.py new file mode 100644 index 000000000000..21db1698078f --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/service/upload_file.py @@ -0,0 +1,135 @@ +import json +import os +from multiprocessing import get_logger + +from langchain.pydantic_v1 import Field +from langchain.schema import Document +from supabase.client import Client + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import get_supabase_client + +logger = get_logger(__name__) + + +# Mapping of file extensions to MIME types +mime_types = { + ".txt": "text/plain", + ".csv": "text/csv", + ".md": "text/markdown", + ".markdown": "text/markdown", + ".telegram": "application/x-telegram", + ".m4a": "audio/mp4", + ".mp3": "audio/mpeg", + ".webm": "audio/webm", + ".mp4": "video/mp4", + ".mpga": "audio/mpeg", + ".wav": "audio/wav", + ".mpeg": "video/mpeg", + ".pdf": "application/pdf", + ".html": "text/html", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".odt": "application/vnd.oasis.opendocument.text", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xls": "application/vnd.ms-excel", + ".epub": "application/epub+zip", + ".ipynb": "application/x-ipynb+json", + ".py": "text/x-python", +} + + +def check_file_exists(brain_id: str, file_identifier: str) -> bool: + supabase_client: Client = get_supabase_client() + try: + # Check if the file exists + logger.info(f"Checking if file {file_identifier} exists.") + # This needs to be converted into a file_identifier that is safe for a URL + + response = supabase_client.storage.from_("quivr").list(brain_id) + + # Check if the file_identifier is in the response + file_exists = any( + file["name"].split(".")[0] == file_identifier.split(".")[0] + for file in response + ) + logger.info(f"File identifier: {file_identifier}") + logger.info(f"File exists: {file_exists}") + if file_exists: + logger.info(f"File {file_identifier} exists.") + return True + else: + logger.info(f"File {file_identifier} does not exist.") + return False + except Exception as e: + logger.error(f"An error occurred while checking the file: {e}") + return True + + +def upload_file_storage(file, file_identifier: str, upsert: str = "false"): + supabase_client: Client = get_supabase_client() + response = None + + try: + # Get the file extension + _, file_extension = os.path.splitext(file_identifier) + + # Get the MIME type for the file extension + mime_type = mime_types.get(file_extension, "text/html") + + response = supabase_client.storage.from_("quivr").upload( + file_identifier, + file, + file_options={ + "content-type": mime_type, + "upsert": upsert, + "cache-control": "3600", + }, + ) + + return response + except Exception as e: + if "The resource already exists" in str(e) and upsert == "true": + response = supabase_client.storage.from_("quivr").update( + file_identifier, + file, + file_options={ + "content-type": mime_type, + "upsert": upsert, + "cache-control": "3600", + }, + ) + else: + raise e + + +class DocumentSerializable(Document): + """Class for storing a piece of text and associated metadata.""" + + page_content: str + metadata: dict = Field(default_factory=dict) + + @property + def lc_serializable(self) -> bool: + return True + + def __repr__(self): + return f"Document(page_content='{self.page_content[:50]}...', metadata={self.metadata})" + + def __str__(self): + return self.__repr__() + + def to_json(self) -> str: + """Convert the Document object to a JSON string.""" + return json.dumps( + { + "page_content": self.page_content, + "metadata": self.metadata, + } + ) + + @classmethod + def from_json(cls, json_str: str): + """Create a Document object from a JSON string.""" + data = json.loads(json_str) + return cls(page_content=data["page_content"], metadata=data["metadata"]) diff --git a/backend/core/quivr_core/api/modules/upload/tests/test_files/test.bib b/backend/core/quivr_core/api/modules/upload/tests/test_files/test.bib new file mode 100644 index 000000000000..14b61838bdf3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/tests/test_files/test.bib @@ -0,0 +1,6 @@ +@Article{citekey, + author = "", + title = "", + journal = "", + year = "", +} diff --git a/backend/core/quivr_core/api/modules/upload/tests/test_files/test.csv b/backend/core/quivr_core/api/modules/upload/tests/test_files/test.csv new file mode 100644 index 000000000000..a025eb598153 --- /dev/null +++ b/backend/core/quivr_core/api/modules/upload/tests/test_files/test.csv @@ -0,0 +1,17 @@ +quivrhq/quivr,Sat May 13 2023 02:20:09 GMT+0200 (heure d’été d’Europe centrale),0 +quivrhq/quivr,Tue May 16 2023 18:03:49 GMT+0200 (heure d’été d’Europe centrale),660 +quivrhq/quivr,Thu May 18 2023 03:04:23 GMT+0200 (heure d’été d’Europe centrale),1380 +quivrhq/quivr,Thu May 18 2023 23:04:11 GMT+0200 (heure d’été d’Europe centrale),2070 +quivrhq/quivr,Sat May 20 2023 04:44:40 GMT+0200 (heure d’été d’Europe centrale),2790 +quivrhq/quivr,Sun May 21 2023 03:19:46 GMT+0200 (heure d’été d’Europe centrale),3510 +quivrhq/quivr,Mon May 22 2023 08:03:18 GMT+0200 (heure d’été d’Europe centrale),4230 +quivrhq/quivr,Tue May 23 2023 16:57:58 GMT+0200 (heure d’été d’Europe centrale),4950 +quivrhq/quivr,Sat May 27 2023 02:18:31 GMT+0200 (heure d’été d’Europe centrale),5640 +quivrhq/quivr,Thu Jun 01 2023 18:45:27 GMT+0200 (heure d’été d’Europe centrale),6360 +quivrhq/quivr,Thu Jun 08 2023 16:33:57 GMT+0200 (heure d’été d’Europe centrale),7080 +quivrhq/quivr,Mon Jun 19 2023 12:58:34 GMT+0200 (heure d’été d’Europe centrale),7800 +quivrhq/quivr,Tue Jun 27 2023 14:45:52 GMT+0200 (heure d’été d’Europe centrale),8520 +quivrhq/quivr,Fri Jun 30 2023 11:43:51 GMT+0200 (heure d’été d’Europe centrale),9210 +quivrhq/quivr,Fri Jul 07 2023 23:08:23 GMT+0200 (heure d’été d’Europe centrale),9930 +quivrhq/quivr,Mon Jul 10 2023 08:13:07 GMT+0200 (heure d’été d’Europe centrale),10650 +quivrhq/quivr,Wed Jul 12 2023 09:40:29 GMT+0200 (heure d’été d’Europe centrale),13837 diff --git a/backend/core/quivr_core/api/modules/upload/tests/test_files/test.pdf b/backend/core/quivr_core/api/modules/upload/tests/test_files/test.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e256f71d70b53665f69f5b84b9bb0cbefa35cfcc GIT binary patch literal 10683 zcmaKy1yGya7Oo2vC|*jTXi1^a;v`6Lio3fLAXw25+@ZL8aVx>yr9jal#Y>S=XmO{w zb3@O6&VS_I`%Px@<=t!VowfG6GT%%dI%RQ579cApHr?d*=1*)OfE{3OVvQ{zz@}~m zw}Ap)i`$zz+d}Q&04Io@698@nhd~*z*_2`SX3nNi7~r+qTPp|~auI+n5C{fH+1pzn zXCWs!0aT%OW>6Ru#vmkwjl8{ImgDc`O(6gvHk+sdfKAaJW(%?TJBaJ=AW175IC2v< zNgD_pDh@TZH-loc$wTcd;FbU&FIY$j;Dr1>5L;}wjG$;aY*Zj_@ENZJD|;qFA)TZr zD6b;vM`yp^QrGWWW9mq>E=#q!*vgx8Ea!^oL&BM*3lt^gUt92&qu8pcO}}z7%nbsP zkY`U>0Gt!#O zV#q(!x7sy}+QcubQ-1pDLJ5tM@liNsT=D_Rh!$Wl?M>g8BlEd}#ni~x)KnLD<2SXT>Gle<< zkmrOv3R5*G9H95-6xh_EZg2pbj4d)y^sjfZzusjGu-PQs;8JREWHs-nN^t=0Pec`2 zdp1dXV3E?#Z`4<`t@j2JQ+X$mL6{b8wMv#G&h z&Zcl>2<(r@jx@-hTLAZ8%m0tFde-7n{i+#Tr$IV&?KfZl!Dss?{M0{q|8 z#2|2pjlISFHWtV#{MRl#e~16WD?k7c%*(-zO!EKZCmt>^@4p*r*8>Z#E-`&q?os*e z!(^slCVYZz^8?yu6`lXX&=0jJ&u<&AWe{G`Cyv?N-Lj>pAC5Ml!lw zwvj<&(q}}j&t5>^G>PmBoShWW#cJb$EvNWbW?mABS9B`J&@V?R6&^dAXj+i2^swRi z^mOudYFRhz(+P_Cbh|ES3;rd$7t!w9FJ}NhQJIMqGj!4hsDPpsNPfd7HOyukF-1aBobRSr>$LrqL zts$D3r1d764)rJ6)6vK>^hQ6P{pw$zkrU=9Of5K%K7$>JUwmH~Z*BkKXyP9@Y(kPO z#IZ6#EsjN~^b&xOUD@c8${5KFs(Yu|r6#TrZ~xW#=HtP_z6>!GZ9dY1gQ9^P8(2{1 zP<$OU^3%~AEe2Ggug~Z`Dn>-FkRk4)ZDi%vY=Ov~&o`es`^q0HUVJ3LSoBV#X>OvmBQC!4i8EsHpq+ExhHH{$i5(6ALnEwuT@^MPDjdd*etJ0?+$g&bNkIh3pbaaT-gM zXx)^OQS!|G$%OC&Dq`rU=uMI3GrBuWz91U$y3FThIGZAKa!OUp8})%MYOiimv1Xe8NT85I9CuaG~ z;{9gggI{;#Zv@OT0=Ej(BL%!!>#O_>OO5jF$M^GEtr`z4ZXQZNT!*`F0&e4e+F$C0 zZg{k0+2sY=M65z~ifXI&U6?w=kZ+w}UE(CZ18RQSQkW8HbqLvT^azxoh~9SH>>rT1 zn1R0i8S_jhWx~nCK#5Gi+%E-VLb{GQUwCX1?&m&3eE|}y%k5kO>lbwd*ziP}8#kQg ztzYfn_+ahT52|fxe?;~45xkLfOV9e?8s{EdC1XQR{W;~gUJ~)=vD6Gv?@|Dux}cvea^jR@Guv`V7v_1OCCG3wZl`YPGba)xl+J^gS!< zQjg-Fux^dMe8GA_->nO|(Ptk1KD-Jd0%hGhm4$l0&04L5KSo~xsLU})dQBYOGADd7 ze)hBdoxztqxOi-C74Y=g!w8?bK%|w`{jb z@nvQVLeM7Fk|~BE_EM(AFHO{%KQz?7-jv`AKO?ncx8bk>3mCg~IeaIzy{u0i9`*HH z4{IaZKUN+M&Z6+(^WZyija$eJWq8)?$e0??_BHhTN9^^OR>F0*TFZ0Rq}6I5&zS*)?RWFXkF8+(s>$klh z#2H-ZR0h=g7;2N~p#?R%%_3NgqfGWpAxWc2_Kw>n8qTFUC0ZxN%Tv5t?px(sm|K{^ z9^FpC9Teq#1${$NFP8fega{yF0KtHmMJON)5L&}{p(EbM?1B@n`?X?&dyXiuv-KS{hol;2^g~+UTEd|6*Fpg)``4JMG9NR4sFyRR zIv;r^4TJ{te+35X4yxPbh9s`;n0UO`T5kFsZm5*t^^cBCtK_4_&;=CIZ$4rPYo-ox!!Lwe1frmissC68y;(gvuOaKfYW47@Bsj z?p-i3Rr}ay<{{hp&+?;K--=_ck@tpsY!ptqn;R;$a^WG*h)HlM_{#}MYgKt*~e`uVlSqA;?WmF{csIL<620uuIpjV_P2I@RDCE?#5}D4?!0gXj{I0p6jPA6nGA*kFO>kBgs@Kc! zl=R1;_tc%!IV7bg%hDGtp(-?@peL3;QP;rC%whxv=ESQl@Cfs`%i20?=$e}^J~Mm8u5;29Luue;XU;__PB1+saxTlkoI+bUa?5t;T} zvfF8}Uv$wjENVamDr!!{+smzF^_&Mq71ZbFD#ijD6^eJ~(-^8M@VUoQ)+22#DI#*g zU1404oRNLszJ0}*^}sC-dqt>3?9)!KR3FR9JP|;m6ja!3tAdtHU(X_~re)#s0deWN z9rC(f_iei2_5f~jf%_JDss1k36u0w$RvlAaGP0^Q0_5@vmtYYyuYFPDOnEc5r_<{{TU1-MziO504(oGX%BW^{+ zqPz3!y(L2*D4lg&%8}DPF^yj4cv2RckdPaEYACdEgno1=9{QfK8u5OaREJvV_1*R3 zGdEpIa_puU`tHUk=ltN#o77Yj|4Qr&D^|u&or!Y}2KmiaR4dfKN^uS`#wWhc$McBQ zJ@n91bg@4G#PhF)@dGVSv#w{TWPX4Bk{`glxyT6)ntzLMw?+(?d@Y$Lr)e)w{ZUC` z$HqHL@-yIfQoSsBjDvteT=}EH%Y?x^RSGI~rvu<%P8KyNl`u)QtFBr^s{5RMT8D$< zu3PcNP16Ql#D0qef{b!?+lYFh@`t_0xGgq)q!#qcv5u|nw2$qh`UAg0DQs~8g~~5e zjDehJ{2Z9su|G6uidb1TLWUTFP=B#8hpS&PASBND^lA7oS7Rjd-oB;S9`9@5s^bbi z?$nQ2JE+@TQ#yDxr&~hdf%fXdelsHrkBcdl*Vkc5h?+x*4zH_JUYcWn4VSn)G>1V0wYioSm@Bn}!lq!1j@74Lw%1ALpFw}>EDCILSY4YurWX$(?=fD*lf#6y;!&PH>GvqgPu=#^%0bS(Vb@a3f~1uboEa&oLg zxXrx6ghP)DZiAVJ>I6g0E_+1wRF72jyR_tZSh!8U?$w-&ajOMi<{j~TbyemvER$v) z>&B)(H@VQN@Uu&e!u5t++;O7cuF%XfuSA2I>E#>=EwS52RqrwsmTvA5!n?)KVPmwwD{+ z0Zow~Kg}#T9@0w5_C>U}Tt1y;l_&CrxMNkeum(V5x?_f8j4nHlMo7GSi@?62_np;mrQ-kKdZwb~f^UCct*p z;-DLu+OrP_-A#+D`xCl8(&v=g7;_kNICBJi7@$M3CW2o+Asu2ax7>IrN&aho4gRMA z(f;z+wmE?vbnQ|yPrb-Oj~e~(Z|R?JE97K$pzPt@VjxaoQolb3EX4oD3uE8Qc$APq z0{In>EqYxLU5P=H*QRYHK9w&^5|t}TS4wN~p8vxKcp@u2^P@cthf384b!2dg*N;RKQK$e>!Z;q!j~VgMFGLbV%zbjuy5F7O z;@bzc3WRlh&PJHIU8CPx=m?Ew$FDkF%jgINg3wlLuZyE`!AVlNnPdI_6aKJAb9mP= z{I$#9J=~cWaTc5X#oj=3WO8_O6ach~L$!Qst;@zMDjp=5=-~Yv0;oH|0)+mKrg2d( z?7`GSH>pev{3?a{wN;;eJa8^!r{@DNMz16CV4@w*SuUGLks^g?q$@}>)RPOEvGnyUt###JTA?VLd z{fn6$FmX7mJL#CKqS0k_mTX84s{JC+LE4?8s4+E9nrJ$&GjB?0Zy1;lm~jL?qn0x- zLH#L7KAWRN5nqW3)Gi;3>ZlKySTxVTUn@}r?wGEp9$3&mKX zIK&cl_>Ts=PCn&hCnv?MUyV6ZCqt;I9u{loct6VbX0IW@*y#j~Q7S*%AeF`>%gqc5 zvyZpL#Q)=rX{0YAVSN2>GLj__O=5~R(qpeIIp zVy?x3Nxa2GleUvh=&Xx3UOl?sU5jdKXj9{9eO3E6&fhG0 zThf;5e_@U+>=3!MwOE6%w4XZHw9EQW9 zF`Q14+V*d?*)VT%Q&_M!beI!qPhiX>#f6~UZ$Huu48CS%vN*?cSts$859k_YS~%q- zB+xp?qaK${xpOlB-~px|!iUh4a8uMF#VhfQtjUZT=01g>mwB*NnIxuFY3wV-e&XqB zs;#fpQ~}ZZ!XMKZy@VQ^;0tXYzn2{X3tOuO%8AS0L&~OCzqWcWmGDSL!52 zDSYBnQqyqADwKUoZE#2XCvlQDoh){`>`*uyXS<)SuRQ2b|D-xbg+4sG8img;vM5zS*M(@ zR!It%m=~G8j+>Y*zcm9lKe@n#Hb5GnlIjUHGgZSAPoavqn^JtX+{=;L?6<`Ma;{jz zQJxD2&Ry%pTUAXeBan*2CamJs3I@lMk>ZNJrh?+A3XMLEu3-lpzcL~|MaT=)JfgsU?jr58rv)eR6o#4$J!=;GZb7bT0o+0UP!&t&d* z@qvR;7G$(nGuwG?`{IU0K6>=QQCtSyd*3$(AOjycJ42PRKAU2@DVH_32=NQ);6lry z^yUQ7%f?#|$m`-U2#FYv$vf{5ozIlB^u8{Jar5fIjS(i{B)bU9ag&L>ymMSNSvcny zog_Fno3M@aTjEOu1ZMt}xbU2wPiWVmY_=Wil`^HjP4np41?)xo7)xGG*;S7XXQFd| z6eXzjxaUxngaQmR`~DdsHftjq5yN8iDLzV4iKLgWsLIcTNIq3YCJ3EY8v*{dJpf{~ z*dMsFJPQ%**Y48ug#vfx^QWUvj<~bj0+qs8Q{7k7u4aZ;`1w=Q8JN@XHrxx>nL-*& zl{i{&4+qygdyr3)b?%)9Ai7%r+sKDFwfrGGA$&I@BiVtrWF4dU~|V!5-pAX-U|s-WvG+Ro_AhLMShg+)+BcUllc z5?!gktK`r_&O)C*`#B0_H~RMl1E4j%VmmH6gtYx1L??Ll%SNz~p5TBAxHaByycKEHr#(<1Ctu zL4+K|`qiYw)oXz(MJY=U&Dl#d^BMZzY;m7(a}~K1hp3yTc<8Twu--zMQpY_n7{)hl zKYbP9psYdoutX!Wa!^^>Qw&8uX80j2OOp_1OZ=#5fI+B#;PLy9Ug*=hW$rqSN_*TQ zCLQJ4w+XNO&VL4l$=X16f=5OaPqMGhd=rX1I^4x}vgA;DO}J(sc9ieyp6Utv?wo0r zE@EGpc62fdnV%X9nhfUC=u2yBzfh{P=lMFA?xBNSu4wgm2hiTiGUgWh!wcm+PXEo?_x0cZ3RkI8D{QqH#)ZJAW&G+csi2f;plr zB9BCZt;X4d-)Xn`$Ika1f7%)go$b6e&vgRrO`TM#&+XG5a>vOi;58%RW7ci&ao4pU zT*ifGGWPLaBtHkr4ErXB%+5e@>s+To0o7-@kso8io;geV_EV<-VadGUS92u@>mTA= z)(#%lp^N*Yu(b$uIR8*aLWWZYf5ug&Wb5Rv{+hss`of5U;+_}fc6ul{&`bD==v_~vQOz5}ZcGMN-fPXHb^O5t(kF)>tD||O+Qfk7vvr&`xa6(qtE@Hh?^{N)A#AO>i zS;JrJj*$G^Xs-I?NBUuJYHY?k{=irCp{CJ|W!*lj5^pu}Mw?HI)9I5^R&LDadSgbz z@gy5r^=n@Z5D6lUHspl@4j$6o&) z74d~WNj`0pTWxF#%4ZBp>)F(Jsgn@OM94tzI72)z{qnF!w zkr|sU`>Qpis|Tw5^d*)gL$I%e^LUDR=CIkc=Vwupn}<3BjCWu@Y|pZ6w{Y; z3>X^#es+>`mR=QR_N-riSWgM$ANq*NBtDQwh#O-cyoITP&edT7O8M$JawdEQC>);) zd?oC{$P3Uo2JnVy@HS9OHnUo%)A6f~Z8E@L)74IpzYw z&dDNDo?ux&D5vV(yK7EtZN+?lVs-4Yhd%NeaPq3Oz6brk7Th!X;;4DD2K@`w&<}9K zxYV%(5=#7tEX#Z%>18v-m;Dl~1RSt4D}2S(=oIrg{3d_zR;QbfJzSCxvltgQW>J{9 zlW^uFTTI!ubZF`rzEoE8W^PnE}+qmpf<)(na z_2~XZ=g`ZFLY(7nIBz+sp-8De#fIoo5cWe&yFuos%vUc1Xrd-gF8}f0rY8(7JKfDLfI_`D)|ElF(dkhpcZN;6TRu z+nubEj8__;pD!ZwlIa-N88y2g#D8?0b?NB%(E)mfwo&dgE3iXH4_Y1O`7;mP^CyB8 z9rA2wMtNEr1f3ldhD)VW1$RM)3FlxB)N{UFw003+)OLp7EFMqJxdqX^Lb9d_#zewG zHpsk%Kgx}w5==UEp1pjYyYBB>_{?vAGNO}U!|#@cD(U>i4f;8Ohm5cPtqdF59bI+0 zA&QT9<<{sV&;!p+|5yJpc84&e&KHiG6h0SG z^t}`QyeXx)c1(N&yYxhgiwV3pACUpwphD`sgh_&-3?J>a<8E)*Wh*i=#7t56rPF0@ zHbl|)#+RY6<4{iV4!Oq;>~i!b}Xh$e)Wz1ZD-XQBX&Y*g!0t0G!xtB2K3FG!rj7 z7b^(N!HHDv7r2*LKtLcXFOZWR35uw}p|+X;PA*m+ULH;k9weaxagc^uSy;jWU?5Tj zv2!446&WOjVr42~caJmK}4f+3kMD?1Co>wiu|ifYad4mQv~lo)cYfWTl@9u8#jkOSNRFfRZE0Vr9=23iKksKOtXo zNfn5W+QTbmd~iutli-R&nN_fP;6LX2YMeo^^JlAFpq<5AUkCBs7qpT7JwoaptEW*h zk2oRylEH0-kf`H8Lhpd<|RB=dblu5unp^G+2O|3^}Q;(g~*Ym-k@zD*oy_1@`Xfzh^J!iXsV7 zB|rP3XG%*v%C-;^SpL6VbdPuaU##^ntK548>E{2psU+->Al|(P{$(8&>3_D783gHQ zq|uNR*B=k7B2QA?Uc=7np4|fg@6YhxZ0-hwnqwn-0)&lhjlV7cHy0NN7r-3wFAa!< z%p(t!6H{)Yx+2Z506*nep3>_E={)wn@O=Ip=XxWWG$2L}DmzQ|5S zw&8!y<>dw;!Mgv_kUu4VYj79@+4rz}&{ECH16dhlSE}0EBY$50(b9i9@SoLkf UserIdentity: + """ + Update user identity. + """ + return user_repository.update_user_properties( + current_user.id, user_identity_updatable_properties + ) + + +@user_router.get( + "/user/identity", + dependencies=[Depends(AuthBearer())], + tags=["User"], +) +def get_user_identity_route( + current_user: UserIdentity = Depends(get_current_user), +) -> UserIdentity: + """ + Get user identity. + """ + return user_repository.get_user_identity(current_user.id) + + +@user_router.delete( + "/user_data", + dependencies=[Depends(AuthBearer())], + tags=["User"], +) +async def delete_user_data_route( + current_user: UserIdentity = Depends(get_current_user), +): + """ + Delete a user. + + - `user_id`: The ID of the user to delete. + + This endpoint deletes a user from the system. + """ + + user_repository.delete_user_data(current_user.id) + + return {"message": "User deleted successfully"} + + +@user_router.get( + "/user/credits", + dependencies=[Depends(AuthBearer())], + tags=["User"], +) +def get_user_credits( + current_user: UserIdentity = Depends(get_current_user), +) -> int: + """ + Get user remaining credits. + """ + return user_repository.get_user_credits(current_user.id) diff --git a/backend/core/quivr_core/api/modules/user/dto/inputs.py b/backend/core/quivr_core/api/modules/user/dto/inputs.py new file mode 100644 index 000000000000..348e99af68b8 --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/dto/inputs.py @@ -0,0 +1,12 @@ +from typing import Optional + +from pydantic import BaseModel + + +class UserUpdatableProperties(BaseModel): + # Nothing for now + username: Optional[str] = None + company: Optional[str] = None + onboarded: Optional[bool] = None + company_size: Optional[str] = None + usage_purpose: Optional[str] = None diff --git a/backend/core/quivr_core/api/modules/user/entity/user_identity.py b/backend/core/quivr_core/api/modules/user/entity/user_identity.py new file mode 100644 index 000000000000..8feaccfe0c2e --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/entity/user_identity.py @@ -0,0 +1,28 @@ +from typing import List, Optional +from uuid import UUID, uuid4 + +from pydantic import BaseModel +from sqlmodel import Field, Relationship, SQLModel + + +class User(SQLModel, table=True): + __tablename__ = "users" # type: ignore + + id: UUID | None = Field( + primary_key=True, + nullable=False, + default_factory=uuid4, + ) + email: str + onboarded: bool | None = None + chats: List["Chat"] | None = Relationship(back_populates="user") # type: ignore + + +class UserIdentity(BaseModel): + id: UUID + email: Optional[str] = None + username: Optional[str] = None + company: Optional[str] = None + onboarded: Optional[bool] = None + company_size: Optional[str] = None + usage_purpose: Optional[str] = None diff --git a/backend/core/quivr_core/api/modules/user/repository/__init__.py b/backend/core/quivr_core/api/modules/user/repository/__init__.py new file mode 100644 index 000000000000..dba60627d7a8 --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/repository/__init__.py @@ -0,0 +1 @@ +from .users import Users diff --git a/backend/core/quivr_core/api/modules/user/repository/users.py b/backend/core/quivr_core/api/modules/user/repository/users.py new file mode 100644 index 000000000000..dbc40a411984 --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/repository/users.py @@ -0,0 +1,127 @@ +import time + +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.user.entity.user_identity import UserIdentity +from quivr_core.api.modules.user.repository.users_interface import UsersInterface +from quivr_core.api.modules.user.service import user_usage + + +class Users(UsersInterface): + def __init__(self): + supabase_client = get_supabase_client() + self.db = supabase_client + + def create_user_identity(self, id): + response = ( + self.db.from_("user_identity") + .insert( + { + "user_id": str(id), + } + ) + .execute() + ) + user_identity = response.data[0] + return UserIdentity(id=user_identity.get("user_id")) + + def update_user_properties( + self, + user_id, + user_identity_updatable_properties, + ): + response = ( + self.db.from_("user_identity") + .update(user_identity_updatable_properties.__dict__) + .filter("user_id", "eq", user_id) # type: ignore + .execute() + ) + + if len(response.data) == 0: + return self.create_user_identity(user_id) + + user_identity = response.data[0] + + print("USER_IDENTITY", user_identity) + return UserIdentity(id=user_id) + + def get_user_identity(self, user_id): + response = ( + self.db.from_("user_identity") + .select("*, users (email)") + .filter("user_id", "eq", str(user_id)) + .execute() + ) + + if len(response.data) == 0: + return self.create_user_identity(user_id) + + user_identity = response.data[0] + + user_identity["id"] = user_id # Add 'id' field to the dictionary + user_identity["email"] = user_identity["users"]["email"] + return UserIdentity(**user_identity) + + def get_user_id_by_user_email(self, email): + response = ( + self.db.rpc("get_user_id_by_user_email", {"user_email": email}) + .execute() + .data + ) + if len(response) > 0: + return response[0]["user_id"] + return None + + def get_user_email_by_user_id(self, user_id): + response = self.db.rpc( + "get_user_email_by_user_id", {"user_id": str(user_id)} + ).execute() + return response.data[0]["email"] + + def delete_user_data(self, user_id): + response = ( + self.db.from_("brains_users") + .select("brain_id") + .filter("rights", "eq", "Owner") + .filter("user_id", "eq", str(user_id)) + .execute() + ) + brain_ids = [row["brain_id"] for row in response.data] + + for brain_id in brain_ids: + self.db.table("brains").delete().filter( + "brain_id", "eq", brain_id + ).execute() + + for brain_id in brain_ids: + self.db.table("brains_vectors").delete().filter( + "brain_id", "eq", brain_id + ).execute() + + for brain_id in brain_ids: + self.db.table("chat_history").delete().filter( + "brain_id", "eq", brain_id + ).execute() + + self.db.table("user_settings").delete().filter( + "user_id", "eq", str(user_id) + ).execute() + self.db.table("user_identity").delete().filter( + "user_id", "eq", str(user_id) + ).execute() + self.db.table("users").delete().filter("id", "eq", str(user_id)).execute() + + def get_user_credits(self, user_id): + user_usage_instance = user_usage.UserUsage(id=user_id) + + user_monthly_usage = user_usage_instance.get_user_monthly_usage( + time.strftime("%Y%m%d") + ) + monthly_chat_credit = ( + self.db.from_("user_settings") + .select("monthly_chat_credit") + .filter("user_id", "eq", str(user_id)) + .execute() + .data[0]["monthly_chat_credit"] + ) + + return monthly_chat_credit - user_monthly_usage diff --git a/backend/core/quivr_core/api/modules/user/repository/users_interface.py b/backend/core/quivr_core/api/modules/user/repository/users_interface.py new file mode 100644 index 000000000000..31e7abab6ea3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/repository/users_interface.py @@ -0,0 +1,63 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties +from quivr_core.api.modules.user.entity.user_identity import UserIdentity + + +class UsersInterface(ABC): + @abstractmethod + def create_user_identity(self, id: UUID) -> UserIdentity: + """ + Create a user identity + """ + pass + + @abstractmethod + def update_user_properties( + self, + user_id: UUID, + user_identity_updatable_properties: UserUpdatableProperties, + ) -> UserIdentity: + """ + Update the user properties + """ + pass + + @abstractmethod + def get_user_identity(self, user_id: UUID) -> UserIdentity: + """ + Get the user identity + """ + pass + + @abstractmethod + def get_user_id_by_user_email(self, email: str) -> UUID | None: + """ + Get the user id by user email + """ + pass + + @abstractmethod + def get_user_email_by_user_id(self, user_id: UUID) -> str: + """ + Get the user email by user id + """ + pass + + @abstractmethod + def delete_user_data(self, user_id: str): + """ + Delete a user. + + - `user_id`: The ID of the user to delete. + + This endpoint deletes a user from the system. + """ + + @abstractmethod + def get_user_credits(self, user_id: UUID) -> int: + """ + Get user remaining credits + """ + pass diff --git a/backend/core/quivr_core/api/modules/user/service/__init__.py b/backend/core/quivr_core/api/modules/user/service/__init__.py new file mode 100644 index 000000000000..254962a4866f --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/service/__init__.py @@ -0,0 +1 @@ +from .user_service import UserService diff --git a/backend/core/quivr_core/api/modules/user/service/user_service.py b/backend/core/quivr_core/api/modules/user/service/user_service.py new file mode 100644 index 000000000000..a4a6a66c827a --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/service/user_service.py @@ -0,0 +1,17 @@ +from uuid import UUID + +from quivr_core.api.modules.user.repository.users import Users +from quivr_core.api.modules.user.repository.users_interface import UsersInterface + + +class UserService: + repository: UsersInterface + + def __init__(self): + self.repository = Users() + + def get_user_id_by_email(self, email: str) -> UUID | None: + return self.repository.get_user_id_by_user_email(email) + + def get_user_email_by_user_id(self, user_id: UUID) -> str | None: + return self.repository.get_user_email_by_user_id(user_id) diff --git a/backend/core/quivr_core/api/modules/user/service/user_usage.py b/backend/core/quivr_core/api/modules/user/service/user_usage.py new file mode 100644 index 000000000000..e5543f18c041 --- /dev/null +++ b/backend/core/quivr_core/api/modules/user/service/user_usage.py @@ -0,0 +1,99 @@ +from quivr_core.api.logger import get_logger +from quivr_core.api.models.databases.supabase.supabase import SupabaseDB +from quivr_core.api.models.settings import PostHogSettings, get_supabase_db +from quivr_core.api.modules.user.entity.user_identity import UserIdentity + +logger = get_logger(__name__) + + +class UserUsage(UserIdentity): + daily_requests_count: int = 0 + + def __init__(self, **data): + super().__init__(**data) + + @property + def supabase_db(self) -> SupabaseDB: + return get_supabase_db() + + def get_user_daily_usage(self): + """ + Fetch the user request stats from the database + """ + request = self.supabase_db.get_user_usage(self.id) + return request + + def get_models(self): + """ + Fetch the user request stats from the database + """ + request = self.supabase_db.get_models() + + return request + + def get_user_settings(self): + """ + Fetch the user settings from the database + """ + posthog = PostHogSettings() + request = self.supabase_db.get_user_settings(self.id) + if request is not None and request.get("is_premium", False): + posthog.set_once_user_properties( + self.id, "HAS_OR_HAD_PREMIUM", {"is_was_premium": "true"} + ) + posthog.set_user_properties( + self.id, "CURRENT_PREMIUM", {"is_premium": "true"} + ) + else: + posthog.set_user_properties( + self.id, "CURRENT_PREMIUM", {"is_premium": "false"} + ) + + return request + + def get_user_monthly_usage(self, date): + """ + Fetch the user monthly usage from the database + """ + posthog = PostHogSettings() + request = self.supabase_db.get_user_requests_count_for_month(self.id, date) + posthog.set_user_properties( + self.id, "MONTHLY_USAGE", {"monthly_chat_usage": request} + ) + + return request + + def handle_increment_user_request_count(self, date, number=1): + """ + Increment the user request count in the database + """ + current_requests_count = self.supabase_db.get_user_requests_count_for_month( + self.id, date + ) + + daily_requests_count = self.supabase_db.get_user_requests_count_for_day( + self.id, date + ) + + # BUG: could be a bug, we are assuming that 0 means no records ! + if daily_requests_count == 0: + logger.info("Request count is 0, creating new record") + if self.email is None: + raise ValueError("User Email should be defined for daily usage table") + self.supabase_db.create_user_daily_usage( + user_id=self.id, date=date, user_email=self.email, number=number + ) + self.daily_requests_count = number + return + + self.supabase_db.increment_user_request_count( + user_id=self.id, + date=date, + number=daily_requests_count + number, + ) + + self.daily_requests_count = current_requests_count + number + + logger.info( + f"User {self.email} request count updated to {self.daily_requests_count}" + ) diff --git a/backend/core/quivr_core/api/modules/user/tests/test_user_controller.py b/backend/core/quivr_core/api/modules/user/tests/test_user_controller.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/__init__.py b/backend/core/quivr_core/api/packages/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/emails/__init__.py b/backend/core/quivr_core/api/packages/emails/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/emails/send_email.py b/backend/core/quivr_core/api/packages/emails/send_email.py new file mode 100644 index 000000000000..2d2b7639aae0 --- /dev/null +++ b/backend/core/quivr_core/api/packages/emails/send_email.py @@ -0,0 +1,11 @@ +from typing import Dict + +import resend + +from quivr_core.models.settings import ResendSettings + + +def send_email(params: Dict): + settings = ResendSettings() + resend.api_key = settings.resend_api_key + return resend.Emails.send(params) diff --git a/backend/core/quivr_core/api/packages/embeddings/__init__.py b/backend/core/quivr_core/api/packages/embeddings/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/embeddings/vectors.py b/backend/core/quivr_core/api/packages/embeddings/vectors.py new file mode 100644 index 000000000000..807b174884cc --- /dev/null +++ b/backend/core/quivr_core/api/packages/embeddings/vectors.py @@ -0,0 +1,73 @@ +from concurrent.futures import ThreadPoolExecutor +from typing import List +from uuid import UUID + +from pydantic import BaseModel + +from quivr_core.logger import get_logger +from quivr_core.models.settings import ( + get_documents_vector_store, + get_embedding_client, + get_supabase_db, +) + +logger = get_logger(__name__) + + +# TODO: Create interface for embeddings and implement it for Supabase and OpenAI (current Quivr) +class Neurons(BaseModel): + def create_vector(self, docs): + documents_vector_store = get_documents_vector_store() + + try: + sids = documents_vector_store.add_documents(docs) + if sids and len(sids) > 0: + return sids + + except Exception as e: + logger.error(f"Error creating vector for document {e}") + + def create_embedding(self, content): + embeddings = get_embedding_client() + return embeddings.embed_query(content) + + +def error_callback(exception): + print("An exception occurred:", exception) + + +def process_batch(batch_ids: List[str]): + supabase_db = get_supabase_db() + + try: + if len(batch_ids) == 1: + return (supabase_db.get_vectors_by_batch(UUID(batch_ids[0]))).data + else: + return (supabase_db.get_vectors_in_batch(batch_ids)).data + except Exception as e: + logger.error("Error retrieving batched vectors", e) + + +# TODO: move to Knowledge class +def get_unique_files_from_vector_ids(vectors_ids): + # Move into Vectors class + """ + Retrieve unique user data vectors. + """ + + # constants + BATCH_SIZE = 5 + + with ThreadPoolExecutor() as executor: + futures = [] + for i in range(0, len(vectors_ids), BATCH_SIZE): + batch_ids = vectors_ids[i : i + BATCH_SIZE] + future = executor.submit(process_batch, batch_ids) + futures.append(future) + + # Retrieve the results + vectors_responses = [future.result() for future in futures] + + documents = [item for sublist in vectors_responses for item in sublist] + unique_files = [dict(t) for t in set(tuple(d.items()) for d in documents)] + return unique_files diff --git a/backend/core/quivr_core/api/packages/files/__init__.py b/backend/core/quivr_core/api/packages/files/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/files/crawl/__init__.py b/backend/core/quivr_core/api/packages/files/crawl/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/files/crawl/crawler.py b/backend/core/quivr_core/api/packages/files/crawl/crawler.py new file mode 100644 index 000000000000..b3a9bbd190fe --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/crawl/crawler.py @@ -0,0 +1,44 @@ +import os +import re +import unicodedata + +from langchain_community.document_loaders import PlaywrightURLLoader +from pydantic import BaseModel + +from quivr_core.logger import get_logger + +logger = get_logger(__name__) + + +class CrawlWebsite(BaseModel): + url: str + js: bool = False + depth: int = int(os.getenv("CRAWL_DEPTH", "1")) + max_pages: int = 100 + max_time: int = 60 + + def process(self) -> str: + # Extract and combine content recursively + loader = PlaywrightURLLoader( + urls=[self.url], remove_selectors=["header", "footer"] + ) + + data = loader.load() + # Now turn the data into a string + logger.info(f"Extracted content from {len(data)} pages") + logger.debug(f"Extracted data : {data}") + extracted_content = "" + for page in data: + extracted_content += page.page_content + + return extracted_content + + def checkGithub(self): + return "github.com" in self.url + + +def slugify(text): + text = unicodedata.normalize("NFKD", text).encode("ascii", "ignore").decode("utf-8") + text = re.sub(r"[^\w\s-]", "", text).strip().lower() + text = re.sub(r"[-\s]+", "-", text) + return text diff --git a/backend/core/quivr_core/api/packages/files/file.py b/backend/core/quivr_core/api/packages/files/file.py new file mode 100644 index 000000000000..053ce2c76d78 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/file.py @@ -0,0 +1,45 @@ +import hashlib +from io import BytesIO + +from fastapi import UploadFile + + +def convert_bytes(bytes, precision=2): + """Converts bytes into a human-friendly format.""" + abbreviations = ["B", "KB", "MB"] + if bytes <= 0: + return "0 B" + size = bytes + index = 0 + while size >= 1024 and index < len(abbreviations) - 1: + size /= 1024 + index += 1 + return f"{size:.{precision}f} {abbreviations[index]}" + + +def get_file_size(file: UploadFile): + if isinstance(file.file, BytesIO): + # If the file object is a BytesIO object, get the size of the bytes data + file_size = len(file.file.getvalue()) + return file_size + # move the cursor to the end of the file + file.file._file.seek(0, 2) # pyright: ignore reportPrivateUsage=none + file_size = ( + file.file._file.tell() # pyright: ignore reportPrivateUsage=none + ) # Getting the size of the file + # move the cursor back to the beginning of the file + file.file.seek(0) + + return file_size + + +def compute_sha1_from_file(file_path): + with open(file_path, "rb") as file: + bytes = file.read() + readable_hash = compute_sha1_from_content(bytes) + return readable_hash + + +def compute_sha1_from_content(content): + readable_hash = hashlib.sha1(content).hexdigest() + return readable_hash diff --git a/backend/core/quivr_core/api/packages/files/loaders/__init__.py b/backend/core/quivr_core/api/packages/files/loaders/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/files/loaders/telegram.py b/backend/core/quivr_core/api/packages/files/loaders/telegram.py new file mode 100644 index 000000000000..e114e46751fe --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/loaders/telegram.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import List + +from langchain.docstore.document import Document +from langchain_community.document_loaders.base import BaseLoader + + +def concatenate_rows(row: dict) -> str: + """Combine message information in a readable format ready to be used.""" + date = row["date"] + sender = row.get( + "from", "Unknown" + ) # Using .get() to handle cases where 'from' might not be present + + text_content = row.get("text", "") + + # Function to process a single text entity + def process_text_entity(entity): + if isinstance(entity, str): + return entity + elif isinstance(entity, dict) and "text" in entity: + return entity["text"] + return "" + + # Process the text content based on its type + if isinstance(text_content, str): + text = text_content + elif isinstance(text_content, list): + text = "".join(process_text_entity(item) for item in text_content) + else: + text = "" + + # Skip messages with empty text + if not text.strip(): + return "" + + return f"{sender} on {date}: {text}\n\n" + + +class TelegramChatFileLoader(BaseLoader): + """Load from `Telegram chat` dump.""" + + def __init__(self, path: str): + """Initialize with a path.""" + self.file_path = path + + def load(self) -> List[Document]: + """Load documents.""" + p = Path(self.file_path) + + with open(p, encoding="utf8") as f: + d = json.load(f) + + text = "".join( + concatenate_rows(message) + for message in d["messages"] + if message["type"] == "message" + and (isinstance(message["text"], str) or isinstance(message["text"], list)) + ) + metadata = {"source": str(p)} + + return [Document(page_content=text, metadata=metadata)] diff --git a/backend/core/quivr_core/api/packages/files/parsers/__init__.py b/backend/core/quivr_core/api/packages/files/parsers/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/files/parsers/audio.py b/backend/core/quivr_core/api/packages/files/parsers/audio.py new file mode 100644 index 000000000000..8c36d17102ab --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/audio.py @@ -0,0 +1,52 @@ +import time + +import openai +from langchain.schema import Document +from langchain.text_splitter import RecursiveCharacterTextSplitter + +from quivr_core.models.files import File +from quivr_core.models.settings import get_documents_vector_store +from quivr_core.packages.files.file import compute_sha1_from_content + + +def process_audio(file: File, **kwargs): + dateshort = time.strftime("%Y%m%d-%H%M%S") + file_meta_name = f"audiotranscript_{dateshort}.txt" + documents_vector_store = get_documents_vector_store() + + with open(file.tmp_file_path, "rb") as audio_file: + transcript = openai.Audio.transcribe("whisper-1", audio_file) + + file_sha = compute_sha1_from_content( + transcript.text.encode("utf-8") # pyright: ignore reportPrivateUsage=none + ) + file_size = len( + transcript.text.encode("utf-8") # pyright: ignore reportPrivateUsage=none + ) + + chunk_size = 500 + chunk_overlap = 0 + + text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( + chunk_size=chunk_size, chunk_overlap=chunk_overlap + ) + texts = text_splitter.split_text( + transcript.text.encode("utf-8") # pyright: ignore reportPrivateUsage=none + ) + + docs_with_metadata = [ + Document( + page_content=text, + metadata={ + "file_sha1": file_sha, + "file_size": file_size, + "file_name": file_meta_name, + "chunk_size": chunk_size, + "chunk_overlap": chunk_overlap, + "date": dateshort, + }, + ) + for text in texts + ] + + documents_vector_store.add_documents(docs_with_metadata) diff --git a/backend/core/quivr_core/api/packages/files/parsers/bibtex.py b/backend/core/quivr_core/api/packages/files/parsers/bibtex.py new file mode 100644 index 000000000000..fde358593d8c --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/bibtex.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import BibtexLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_bibtex( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=BibtexLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/code_python.py b/backend/core/quivr_core/api/packages/files/parsers/code_python.py new file mode 100644 index 000000000000..2222be417563 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/code_python.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import PythonLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_python( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=PythonLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/common.py b/backend/core/quivr_core/api/packages/files/parsers/common.py new file mode 100644 index 000000000000..6d6c8a1c6536 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/common.py @@ -0,0 +1,118 @@ +import asyncio +import os +import tempfile +import time + +import nest_asyncio +import tiktoken +import uvloop +from langchain.schema import Document +from langchain.text_splitter import RecursiveCharacterTextSplitter +from llama_parse import LlamaParse + +from quivr_core.logger import get_logger +from quivr_core.models.files import File +from quivr_core.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.modules.upload.service.upload_file import DocumentSerializable +from quivr_core.packages.embeddings.vectors import Neurons + +if not isinstance(asyncio.get_event_loop(), uvloop.Loop): + nest_asyncio.apply() + +logger = get_logger(__name__) + + +def process_file( + file: File, + loader_class, + brain_id, + original_file_name, + integration=None, + integration_link=None, +): + dateshort = time.strftime("%Y%m%d") + neurons = Neurons() + + if os.getenv("LLAMA_CLOUD_API_KEY"): + doc = file.file + document_ext = os.path.splitext(doc.filename)[1] + if document_ext in [".pdf", ".docx", ".doc"]: + document_tmp = tempfile.NamedTemporaryFile( + suffix=document_ext, delete=False + ) + # Seek to the beginning of the file + doc.file.seek(0) + document_tmp.write(doc.file.read()) + + parser = LlamaParse( + result_type="markdown", # "markdown" and "text" are available + parsing_instruction="Extract the tables and transform checkboxes into text. Transform tables to key = value. You can duplicates Keys if needed. For example: Productions Fonts = 300 productions Fonts Company Desktop License = Yes for Maximum of 60 Licensed Desktop users For example checkboxes should be: Premium Activated = Yes License Premier = No If a checkbox is present for a table with multiple options. Say Yes for the one activated and no for the one not activated. Format using headers.", + gpt4o_mode=True, + gpt4o_api_key=os.getenv("OPENAI_API_KEY"), + ) + + document_llama_parsed = parser.load_data(document_tmp.name) + document_tmp.close() + document_to_langchain = document_llama_parsed[0].to_langchain_format() + text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( + chunk_size=file.chunk_size, chunk_overlap=file.chunk_overlap + ) + document_to_langchain = Document( + page_content=document_to_langchain.page_content + ) + file.documents = text_splitter.split_documents([document_to_langchain]) + else: + file.compute_documents(loader_class) + + metadata = { + "file_sha1": file.file_sha1, + "file_size": file.file_size, + "file_name": file.file_name, + "chunk_size": file.chunk_size, + "chunk_overlap": file.chunk_overlap, + "date": dateshort, + "original_file_name": original_file_name or file.file_name, + "integration": integration or "", + "integration_link": integration_link or "", + } + docs = [] + + enc = tiktoken.get_encoding("cl100k_base") + + if file.documents is not None: + logger.info("Coming here?") + for index, doc in enumerate(file.documents, start=1): # pyright: ignore reportPrivateUsage=none + new_metadata = metadata.copy() + logger.info(f"Processing document {doc}") + # Add filename at beginning of page content + doc.page_content = f"Filename: {new_metadata['original_file_name']} Content: {doc.page_content}" + + doc.page_content = doc.page_content.replace("\u0000", "") + + len_chunk = len(enc.encode(doc.page_content)) + + # Ensure the text is in UTF-8 + doc.page_content = doc.page_content.encode("utf-8", "replace").decode( + "utf-8" + ) + + new_metadata["chunk_size"] = len_chunk + new_metadata["index"] = index + doc_with_metadata = DocumentSerializable( + page_content=doc.page_content, metadata=new_metadata + ) + docs.append(doc_with_metadata) + + created_vector = neurons.create_vector(docs) + + brain_vector_service = BrainVectorService(brain_id) + + if created_vector is not None: + for created_vector_id in created_vector: + result = brain_vector_service.create_brain_vector( + created_vector_id, metadata["file_sha1"] + ) + logger.debug(f"Brain vector created: {result}") + return len(created_vector) + else: + return 0 diff --git a/backend/core/quivr_core/api/packages/files/parsers/csv.py b/backend/core/quivr_core/api/packages/files/parsers/csv.py new file mode 100644 index 000000000000..c3d8336482cc --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/csv.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import CSVLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_csv( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=CSVLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/docx.py b/backend/core/quivr_core/api/packages/files/parsers/docx.py new file mode 100644 index 000000000000..d9b0fb2afb82 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/docx.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import Docx2txtLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_docx( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=Docx2txtLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/epub.py b/backend/core/quivr_core/api/packages/files/parsers/epub.py new file mode 100644 index 000000000000..5b3393e16187 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/epub.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders.epub import UnstructuredEPubLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_epub( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=UnstructuredEPubLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/github.py b/backend/core/quivr_core/api/packages/files/parsers/github.py new file mode 100644 index 000000000000..66e99d88aa79 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/github.py @@ -0,0 +1,79 @@ +import os +import time + +from langchain.schema import Document +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.document_loaders import GitLoader + +from quivr_core.models.files import File +from quivr_core.packages.embeddings.vectors import Neurons +from quivr_core.packages.files.file import compute_sha1_from_content + + +def process_github( + repo, + brain_id, +): + random_dir_name = os.urandom(16).hex() + dateshort = time.strftime("%Y%m%d") + loader = GitLoader( + clone_url=repo, + repo_path="/tmp/" + random_dir_name, + ) + documents = loader.load() + os.system("rm -rf /tmp/" + random_dir_name) + + chunk_size = 500 + chunk_overlap = 0 + text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder( + chunk_size=chunk_size, chunk_overlap=chunk_overlap + ) + + documents = text_splitter.split_documents(documents) + + for doc in documents: + if doc.metadata["file_type"] in [ + ".pyc", + ".png", + ".svg", + ".env", + ".lock", + ".gitignore", + ".gitmodules", + ".gitattributes", + ".gitkeep", + ".git", + ".json", + ]: + continue + metadata = { + "file_sha1": compute_sha1_from_content(doc.page_content.encode("utf-8")), + "file_size": len(doc.page_content) * 8, + "file_name": doc.metadata["file_name"], + "chunk_size": chunk_size, + "chunk_overlap": chunk_overlap, + "date": dateshort, + "original_file_name": doc.metadata["original_file_name"], + } + doc_with_metadata = Document(page_content=doc.page_content, metadata=metadata) + + print(doc_with_metadata.metadata["file_name"]) + + file = File( + file_sha1=compute_sha1_from_content(doc.page_content.encode("utf-8")) + ) + + file_exists = file.file_already_exists() + + if not file_exists: + neurons = Neurons() + created_vector = neurons.create_vector(doc_with_metadata) + + file_exists_in_brain = file.file_already_exists_in_brain(brain_id) + + if not file_exists_in_brain: + file.link_file_to_brain(brain_id) + return { + "message": f"✅ Github with {len(documents)} files has been uploaded.", + "type": "success", + } diff --git a/backend/core/quivr_core/api/packages/files/parsers/html.py b/backend/core/quivr_core/api/packages/files/parsers/html.py new file mode 100644 index 000000000000..98ce97095713 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/html.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import UnstructuredHTMLLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_html( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=UnstructuredHTMLLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/markdown.py b/backend/core/quivr_core/api/packages/files/parsers/markdown.py new file mode 100644 index 000000000000..d5b016489f7d --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/markdown.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import UnstructuredMarkdownLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_markdown( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=UnstructuredMarkdownLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/notebook.py b/backend/core/quivr_core/api/packages/files/parsers/notebook.py new file mode 100644 index 000000000000..86b283d714f2 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/notebook.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import NotebookLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_ipnyb( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=NotebookLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/odt.py b/backend/core/quivr_core/api/packages/files/parsers/odt.py new file mode 100644 index 000000000000..fdb8a4de61dd --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/odt.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import UnstructuredPDFLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_odt( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=UnstructuredPDFLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/pdf.py b/backend/core/quivr_core/api/packages/files/parsers/pdf.py new file mode 100644 index 000000000000..21418a98d6ae --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/pdf.py @@ -0,0 +1,22 @@ +from langchain_community.document_loaders import UnstructuredPDFLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_pdf( + file: File, + brain_id, + original_file_name, + integration=None, + integration_link=None, +): + return process_file( + file=file, + loader_class=UnstructuredPDFLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py b/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py new file mode 100644 index 000000000000..1b1363326874 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import UnstructuredFileLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_powerpoint( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=UnstructuredFileLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/telegram.py b/backend/core/quivr_core/api/packages/files/parsers/telegram.py new file mode 100644 index 000000000000..477c0a10bb21 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/telegram.py @@ -0,0 +1,17 @@ +from quivr_core.models.files import File +from quivr_core.packages.files.loaders.telegram import TelegramChatFileLoader + +from .common import process_file + + +def process_telegram( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=TelegramChatFileLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/txt.py b/backend/core/quivr_core/api/packages/files/parsers/txt.py new file mode 100644 index 000000000000..172aeea4ec64 --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/txt.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import TextLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_txt( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=TextLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/parsers/xlsx.py b/backend/core/quivr_core/api/packages/files/parsers/xlsx.py new file mode 100644 index 000000000000..c0069ced31eb --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/parsers/xlsx.py @@ -0,0 +1,18 @@ +from langchain_community.document_loaders import UnstructuredExcelLoader + +from quivr_core.models.files import File + +from .common import process_file + + +def process_xlsx( + file: File, brain_id, original_file_name, integration=None, integration_link=None +): + return process_file( + file=file, + loader_class=UnstructuredExcelLoader, + brain_id=brain_id, + original_file_name=original_file_name, + integration=integration, + integration_link=integration_link, + ) diff --git a/backend/core/quivr_core/api/packages/files/processors.py b/backend/core/quivr_core/api/packages/files/processors.py new file mode 100644 index 000000000000..d4922317c51e --- /dev/null +++ b/backend/core/quivr_core/api/packages/files/processors.py @@ -0,0 +1,108 @@ +from quivr_core.modules.brain.service.brain_service import BrainService + +from .parsers.audio import process_audio +from .parsers.bibtex import process_bibtex +from .parsers.code_python import process_python +from .parsers.csv import process_csv +from .parsers.docx import process_docx +from .parsers.epub import process_epub +from .parsers.html import process_html +from .parsers.markdown import process_markdown +from .parsers.notebook import process_ipnyb +from .parsers.odt import process_odt +from .parsers.pdf import process_pdf +from .parsers.powerpoint import process_powerpoint +from .parsers.telegram import process_telegram +from .parsers.txt import process_txt +from .parsers.xlsx import process_xlsx + +file_processors = { + ".txt": process_txt, + ".csv": process_csv, + ".md": process_markdown, + ".markdown": process_markdown, + ".telegram": process_telegram, + ".m4a": process_audio, + ".mp3": process_audio, + ".webm": process_audio, + ".mp4": process_audio, + ".mpga": process_audio, + ".wav": process_audio, + ".mpeg": process_audio, + ".pdf": process_pdf, + ".html": process_html, + ".bib": process_bibtex, + ".pptx": process_powerpoint, + ".docx": process_docx, + ".odt": process_odt, + ".xlsx": process_xlsx, + ".xls": process_xlsx, + ".epub": process_epub, + ".ipynb": process_ipnyb, + ".py": process_python, +} + + +def create_response(message, type): + return {"message": message, "type": type} + + +brain_service = BrainService() + + +# TODO: Move filter_file to a file service to avoid circular imports from quivr_core.models/files.py for File class +def filter_file( + file, + brain_id, + original_file_name=None, +): + file_exists = file.file_already_exists() + file_exists_in_brain = file.file_already_exists_in_brain(brain_id) + using_file_name = file.file_name + + brain = brain_service.get_brain_by_id(brain_id) + if brain is None: + raise Exception("It seems like you're uploading knowledge to an unknown brain.") + + if file_exists_in_brain: + return create_response( + f"🤔 {using_file_name} already exists in brain {brain.name}.", # pyright: ignore reportPrivateUsage=none + "warning", + ) + elif file.file_is_empty(): + return create_response( + f"❌ {original_file_name} is empty.", # pyright: ignore reportPrivateUsage=none + "error", # pyright: ignore reportPrivateUsage=none + ) + elif file_exists: + file.link_file_to_brain(brain_id) + return create_response( + f"✅ {using_file_name} has been uploaded to brain {brain.name}.", # pyright: ignore reportPrivateUsage=none + "success", + ) + + if file.file_extension in file_processors: + try: + result = file_processors[file.file_extension]( + file=file, + brain_id=brain_id, + original_file_name=original_file_name, + ) + if result is None or result == 0: + return create_response( + f"? {using_file_name} has been uploaded to brain. There might have been an error while reading it, please make sure the file is not illformed or just an image", # pyright: ignore reportPrivateUsage=none + "warning", + ) + return create_response( + f"✅ {using_file_name} has been uploaded to brain {brain.name} in {result} chunks", # pyright: ignore reportPrivateUsage=none + "success", + ) + except Exception as e: + # Add more specific exceptions as needed. + print(f"Error processing file: {e}") + raise e + + return create_response( + f"❌ {using_file_name} is not supported.", # pyright: ignore reportPrivateUsage=none + "error", + ) diff --git a/backend/core/quivr_core/api/packages/quivr_core/__init__.py b/backend/core/quivr_core/api/packages/quivr_core/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/packages/quivr_core/config.py b/backend/core/quivr_core/api/packages/quivr_core/config.py new file mode 100644 index 000000000000..1f6c5619b661 --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/config.py @@ -0,0 +1,23 @@ +from pydantic import BaseModel, field_validator + + +class RAGConfig(BaseModel): + model: str = "gpt-3.5-turbo-0125" # pyright: ignore reportPrivateUsage=none + temperature: float | None = 0.1 + max_tokens: int | None = 2000 + max_input: int = 2000 + streaming: bool = False + max_files: int = 20 + prompt: str | None = None + + @field_validator("temperature", mode="before") + def set_default_temperature(cls, v): + if v is None: + return 0.1 + return v + + @field_validator("max_tokens", mode="before") + def set_default_max_tokens(cls, v): + if v is None: + return 2000 + return v diff --git a/backend/core/quivr_core/api/packages/quivr_core/models.py b/backend/core/quivr_core/api/packages/quivr_core/models.py new file mode 100644 index 000000000000..56ce0185f5cd --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/models.py @@ -0,0 +1,58 @@ +from typing import Any + +from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 +from langchain_core.pydantic_v1 import Field as FieldV1 +from pydantic import BaseModel +from typing_extensions import TypedDict + + +class cited_answer(BaseModelV1): + """Answer the user question based only on the given sources, and cite the sources used.""" + + answer: str = FieldV1( + ..., + description="The answer to the user question, which is based only on the given sources.", + ) + thoughts: str = FieldV1( + ..., + description="""Description of the thought process, based only on the given sources. + Cite the text as much as possible and give the document name it appears in. In the format : 'Doc_name states : cited_text'. Be the most + procedural as possible. Write all the steps needed to find the answer until you find it.""", + ) + citations: list[int] = FieldV1( + ..., + description="The integer IDs of the SPECIFIC sources which justify the answer.", + ) + + followup_questions: list[str] = FieldV1( + ..., + description="Generate up to 3 follow-up questions that could be asked based on the answer given or context provided.", + ) + + +class RawRAGChunkResponse(TypedDict): + answer: dict[str, Any] + docs: dict[str, Any] + + +class RawRAGResponse(TypedDict): + answer: dict[str, Any] + docs: dict[str, Any] + + +class RAGResponseMetadata(BaseModel): + citations: list[int] | None = None + thoughts: str | list[str] | None = None + followup_questions: list[str] | None = None + sources: list[Any] | None = None + + +class ParsedRAGResponse(BaseModel): + answer: str + metadata: RAGResponseMetadata | None = None + + +class ParsedRAGChunkResponse(BaseModel): + answer: str + metadata: RAGResponseMetadata + last_chunk: bool = False diff --git a/backend/core/quivr_core/api/packages/quivr_core/prompts.py b/backend/core/quivr_core/api/packages/quivr_core/prompts.py new file mode 100644 index 000000000000..d0fb80cca13c --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/prompts.py @@ -0,0 +1,58 @@ +import datetime + +from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate +from langchain_core.prompts import ChatPromptTemplate, PromptTemplate + +# First step is to create the Rephrasing Prompt +_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. + +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question:""" +CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) + +# Next is the answering prompt + +template_answer = """ +Context: +{context} + +User Question: {question} +Answer: +""" + +today_date = datetime.datetime.now().strftime("%B %d, %Y") + +system_message_template = ( + f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}." +) + +system_message_template += """ +When answering use markdown. +Use markdown code blocks for code snippets. +Answer in a concise and clear manner. +Use the following pieces of context from files provided by the user to answer the users. +Answer in the same language as the user question. +If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer. +Don't cite the source id in the answer objects, but you can use the source to answer the question. +You have access to the files to answer the user question (limited to first 20 files): +{files} + +If not None, User instruction to follow to answer: {custom_instructions} +Don't cite the source id in the answer objects, but you can use the source to answer the question. +""" + + +ANSWER_PROMPT = ChatPromptTemplate.from_messages( + [ + SystemMessagePromptTemplate.from_template(system_message_template), + HumanMessagePromptTemplate.from_template(template_answer), + ] +) + + +# How we format documents +DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template( + template="Source: {index} \n {page_content}" +) diff --git a/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py b/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py new file mode 100644 index 000000000000..77e2e9e5daaa --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py @@ -0,0 +1,232 @@ +import logging +import os +from operator import itemgetter +from typing import AsyncGenerator + +from flashrank import Ranker +from langchain.retrievers import ContextualCompressionRetriever +from langchain.retrievers.document_compressors.flashrank_rerank import FlashrankRerank +from langchain_cohere import CohereRerank +from langchain_community.chat_models import ChatLiteLLM +from langchain_core.messages.ai import AIMessageChunk +from langchain_core.output_parsers import StrOutputParser +from langchain_core.runnables import RunnableLambda, RunnablePassthrough +from langchain_core.vectorstores import VectorStore +from langchain_openai import ChatOpenAI + +from quivr_core.modules.knowledge.entity.knowledge import Knowledge +from quivr_core.packages.quivr_core.config import RAGConfig +from quivr_core.packages.quivr_core.models import ( + ParsedRAGChunkResponse, + ParsedRAGResponse, + cited_answer, +) +from quivr_core.packages.quivr_core.prompts import ( + ANSWER_PROMPT, + CONDENSE_QUESTION_PROMPT, +) +from quivr_core.packages.quivr_core.utils import ( + combine_documents, + format_file_list, + get_chunk_metadata, + model_supports_function_calling, + parse_chunk_response, + parse_response, +) + +logger = logging.getLogger(__name__) + + +class QuivrQARAG: + def __init__( + self, + *, + rag_config: RAGConfig, + llm: ChatLiteLLM, + vector_store: VectorStore, + ): + self.rag_config = rag_config + self.vector_store = vector_store + self.llm = llm + self.reranker = self._create_reranker() + self.supports_func_calling = model_supports_function_calling( + self.rag_config.model + ) + + @property + def retriever(self): + return self.vector_store.as_retriever() + + def _create_reranker(self): + # TODO: reranker config + if os.getenv("COHERE_API_KEY"): + compressor = CohereRerank(top_n=20) + else: + ranker_model_name = "ms-marco-TinyBERT-L-2-v2" + flashrank_client = Ranker(model_name=ranker_model_name) + compressor = FlashrankRerank( + client=flashrank_client, model=ranker_model_name, top_n=20 + ) + return compressor + + # TODO : refactor and simplify + def filter_history( + self, chat_history, max_history: int = 10, max_tokens: int = 2000 + ): + """ + Filter out the chat history to only include the messages that are relevant to the current question + + Takes in a chat_history= [HumanMessage(content='Qui est Chloé ? '), AIMessage(content="Chloé est une salariée travaillant pour l'entreprise Quivr en tant qu'AI Engineer, sous la direction de son supérieur hiérarchique, Stanislas Girard."), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content=''), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content="Désolé, je n'ai pas d'autres informations sur Chloé à partir des fichiers fournis.")] + Returns a filtered chat_history with in priority: first max_tokens, then max_history where a Human message and an AI message count as one pair + a token is 4 characters + """ + chat_history = chat_history[::-1] + total_tokens = 0 + total_pairs = 0 + filtered_chat_history = [] + for i in range(0, len(chat_history), 2): + if i + 1 < len(chat_history): + human_message = chat_history[i] + ai_message = chat_history[i + 1] + message_tokens = ( + len(human_message.content) + len(ai_message.content) + ) // 4 + if ( + total_tokens + message_tokens > max_tokens + or total_pairs >= max_history + ): + break + filtered_chat_history.append(human_message) + filtered_chat_history.append(ai_message) + total_tokens += message_tokens + total_pairs += 1 + chat_history = filtered_chat_history[::-1] + + return chat_history + + def build_chain(self, files: str): + compression_retriever = ContextualCompressionRetriever( + base_compressor=self.reranker, base_retriever=self.retriever + ) + + loaded_memory = RunnablePassthrough.assign( + chat_history=RunnableLambda( + lambda x: self.filter_history(x["chat_history"]), + ), + question=lambda x: x["question"], + ) + + standalone_question = { + "standalone_question": { + "question": lambda x: x["question"], + "chat_history": itemgetter("chat_history"), + } + | CONDENSE_QUESTION_PROMPT + | self.llm + | StrOutputParser(), + } + + # Now we retrieve the documents + retrieved_documents = { + "docs": itemgetter("standalone_question") | compression_retriever, + "question": lambda x: x["standalone_question"], + "custom_instructions": lambda x: self.rag_config.prompt, + } + + final_inputs = { + "context": lambda x: combine_documents(x["docs"]), + "question": itemgetter("question"), + "custom_instructions": itemgetter("custom_instructions"), + "files": lambda _: files, # TODO: shouldn't be here + } + + # Override llm if we have a OpenAI model + llm = self.llm + if self.supports_func_calling: + if self.rag_config.temperature: + llm_function = ChatOpenAI( + max_tokens=self.rag_config.max_tokens, + model=self.rag_config.model, + temperature=self.rag_config.temperature, + ) + else: + llm_function = ChatOpenAI( + max_tokens=self.rag_config.max_tokens, + model=self.rag_config.model, + ) + + llm = llm_function.bind_tools( + [cited_answer], + tool_choice="any", + ) + + answer = { + "answer": final_inputs | ANSWER_PROMPT | llm, + "docs": itemgetter("docs"), + } + + return loaded_memory | standalone_question | retrieved_documents | answer + + def answer( + self, + question: str, + history: list[dict[str, str]], + list_files: list[Knowledge], + metadata: dict[str, str] = {}, + ) -> ParsedRAGResponse: + concat_list_files = format_file_list(list_files, self.rag_config.max_files) + conversational_qa_chain = self.build_chain(concat_list_files) + raw_llm_response = conversational_qa_chain.invoke( + { + "question": question, + "chat_history": history, + "custom_instructions": (self.rag_config.prompt), + }, + config={"metadata": metadata}, + ) + response = parse_response(raw_llm_response, self.rag_config.model) + return response + + async def answer_astream( + self, + question: str, + history: list[dict[str, str]], + list_files: list[Knowledge], + metadata: dict[str, str] = {}, + ) -> AsyncGenerator[ParsedRAGChunkResponse, ParsedRAGChunkResponse]: + concat_list_files = format_file_list(list_files, self.rag_config.max_files) + conversational_qa_chain = self.build_chain(concat_list_files) + + rolling_message = AIMessageChunk(content="") + sources = [] + + async for chunk in conversational_qa_chain.astream( + { + "question": question, + "chat_history": history, + "custom_personality": (self.rag_config.prompt), + }, + config={"metadata": metadata}, + ): + # Could receive this anywhere so we need to save it for the last chunk + if "docs" in chunk: + sources = chunk["docs"] if "docs" in chunk else [] + + if "answer" in chunk: + rolling_message, parsed_chunk = parse_chunk_response( + rolling_message, + chunk, + self.supports_func_calling, + ) + + if self.supports_func_calling and len(parsed_chunk.answer) > 0: + yield parsed_chunk + else: + yield parsed_chunk + + # Last chunk provies + yield ParsedRAGChunkResponse( + answer="", + metadata=get_chunk_metadata(rolling_message, sources), + last_chunk=True, + ) diff --git a/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py b/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py new file mode 100644 index 000000000000..49d998521304 --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py @@ -0,0 +1,32 @@ +from typing import Type + +from quivr_core.logger import get_logger +from quivr_core.modules.brain.entity.integration_brain import IntegrationEntity +from quivr_core.modules.brain.integrations.Big.Brain import BigBrain +from quivr_core.modules.brain.integrations.GPT4.Brain import GPT4Brain +from quivr_core.modules.brain.integrations.Multi_Contract.Brain import ( + MultiContractBrain, +) +from quivr_core.modules.brain.integrations.Notion.Brain import NotionBrain +from quivr_core.modules.brain.integrations.Proxy.Brain import ProxyBrain +from quivr_core.modules.brain.integrations.Self.Brain import SelfBrain +from quivr_core.modules.brain.integrations.SQL.Brain import SQLBrain +from quivr_core.modules.brain.knowledge_brain_qa import KnowledgeBrainQA + +logger = get_logger(__name__) + + +class RAGServiceFactory: + integration_list: dict[str, Type[KnowledgeBrainQA]] = { + "notion": NotionBrain, + "gpt4": GPT4Brain, + "sql": SQLBrain, + "big": BigBrain, + "doc": KnowledgeBrainQA, + "proxy": ProxyBrain, + "self": SelfBrain, + "multi-contract": MultiContractBrain, + } + + def get_brain_cls(self, integration: IntegrationEntity): + pass diff --git a/backend/core/quivr_core/api/packages/quivr_core/rag_service.py b/backend/core/quivr_core/api/packages/quivr_core/rag_service.py new file mode 100644 index 000000000000..10cbfd0b65e8 --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/rag_service.py @@ -0,0 +1,296 @@ +import datetime +from uuid import UUID, uuid4 + +from langchain_community.chat_models import ChatLiteLLM + +from quivr_core.logger import get_logger +from quivr_core.models.settings import ( + get_embedding_client, + get_supabase_client, + settings, +) +from quivr_core.modules.brain.entity.brain_entity import BrainEntity +from quivr_core.modules.brain.service.brain_service import BrainService +from quivr_core.modules.brain.service.utils.format_chat_history import ( + format_chat_history, +) +from quivr_core.modules.chat.controller.chat.utils import ( + compute_cost, + find_model_and_generate_metadata, + update_user_usage, +) +from quivr_core.modules.chat.dto.inputs import CreateChatHistory +from quivr_core.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.modules.chat.service.chat_service import ChatService +from quivr_core.modules.knowledge.repository.knowledges import KnowledgeRepository +from quivr_core.modules.prompt.entity.prompt import Prompt +from quivr_core.modules.prompt.service.prompt_service import PromptService +from quivr_core.modules.user.entity.user_identity import UserIdentity +from quivr_core.modules.user.service.user_usage import UserUsage +from quivr_core.packages.quivr_core.config import RAGConfig +from quivr_core.packages.quivr_core.models import ParsedRAGResponse, RAGResponseMetadata +from quivr_core.packages.quivr_core.quivr_rag import QuivrQARAG +from quivr_core.packages.quivr_core.utils import generate_source +from quivr_core.vectorstore.supabase import CustomSupabaseVectorStore + +logger = get_logger(__name__) + + +class RAGService: + def __init__( + self, + current_user: UserIdentity, + brain_id: UUID | None, + chat_id: UUID, + brain_service: BrainService, + prompt_service: PromptService, + chat_service: ChatService, + knowledge_service: KnowledgeRepository, + ): + # Services + self.brain_service = brain_service + self.prompt_service = prompt_service + self.chat_service = chat_service + self.knowledge_service = knowledge_service + + # Base models + self.current_user = current_user + self.chat_id = chat_id + self.brain = self.get_or_create_brain(brain_id, self.current_user.id) + self.prompt = self.get_brain_prompt(self.brain) + + # check at init time + self.model_to_use = self.check_and_update_user_usage( + self.current_user, self.brain + ) + + def get_brain_prompt(self, brain: BrainEntity) -> Prompt | None: + return ( + self.prompt_service.get_prompt_by_id(brain.prompt_id) + if brain.prompt_id + else None + ) + + def get_llm(self, rag_config: RAGConfig): + api_base = ( + settings.ollama_api_base_url + if settings.ollama_api_base_url and rag_config.model.startswith("ollama") + else None + ) + return ChatLiteLLM( + temperature=rag_config.temperature, + max_tokens=rag_config.max_tokens, + model=rag_config.model, + streaming=rag_config.streaming, + verbose=False, + api_base=api_base, + ) # pyright: ignore reportPrivateUsage=none + + def get_or_create_brain(self, brain_id: UUID | None, user_id: UUID) -> BrainEntity: + brain = None + if brain_id is not None: + brain = self.brain_service.get_brain_details(brain_id, user_id) + + # TODO: Create if doesn't exist + assert brain + + if brain.integration: + assert brain.integration.user_id == user_id + return brain + + def check_and_update_user_usage(self, user: UserIdentity, brain: BrainEntity): + """Check user limits and raises if user reached his limits: + 1. Raise if one of the conditions : + - User doesn't have access to brains + - Model of brain is not is user_settings.models + - Latest sum_30d(user_daily_user) < user_settings.max_monthly_usage + - Check sum(user_settings.daily_user_count)+ model_price < user_settings.monthly_chat_credits + 2. Updates user usage + """ + # TODO(@aminediro) : THIS is bug prone, should retrieve it from DB here + user_usage = UserUsage(id=user.id, email=user.email) + user_settings = user_usage.get_user_settings() + all_models = user_usage.get_models() + + # TODO(@aminediro): refactor this function + model_to_use = find_model_and_generate_metadata( + brain.model, + user_settings, + all_models, + ) + cost = compute_cost(model_to_use, all_models) + # Raises HTTP if user usage exceeds limits + update_user_usage(user_usage, user_settings, cost) # noqa: F821 + return model_to_use + + def create_vector_store( + self, brain_id: UUID, max_input: int + ) -> CustomSupabaseVectorStore: + supabase_client = get_supabase_client() + embeddings = get_embedding_client() + return CustomSupabaseVectorStore( + supabase_client, + embeddings, + table_name="vectors", + brain_id=brain_id, + max_input=max_input, + ) + + def save_answer(self, question: str, answer: ParsedRAGResponse): + return self.chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": self.chat_id, + "user_message": question, + "assistant": answer.answer, + "brain_id": self.brain.brain_id, + # TODO: prompt_id should always be not None + "prompt_id": self.prompt.id if self.prompt else None, + "metadata": answer.metadata.model_dump() if answer.metadata else {}, + } + ) + ) + + async def generate_answer( + self, + question: str, + ): + logger.info( + f"Creating question for chat {self.chat_id} with brain {self.brain.brain_id} " + ) + rag_config = RAGConfig( + model=self.model_to_use.name, + temperature=self.brain.temperature, + max_input=self.model_to_use.max_input, + max_tokens=self.brain.max_tokens, + prompt=self.prompt.content if self.prompt else None, + streaming=False, + ) + history = await self.chat_service.get_chat_history(self.chat_id) + # Get list of files + list_files = self.knowledge_service.get_all_knowledge_in_brain( + self.brain.brain_id + ) + # Build RAG dependencies to inject + vector_store = self.create_vector_store( + self.brain.brain_id, rag_config.max_input + ) + llm = self.get_llm(rag_config) + # Initialize the RAG pipline + rag_pipeline = QuivrQARAG( + rag_config=rag_config, llm=llm, vector_store=vector_store + ) + # Format the history, sanitize the input + transformed_history = format_chat_history(history) + + parsed_response = rag_pipeline.answer(question, transformed_history, list_files) + + # Save the answer to db + new_chat_entry = self.save_answer(question, parsed_response) + + # Format output to be correct + return GetChatHistoryOutput( + **{ + "chat_id": self.chat_id, + "user_message": question, + "assistant": parsed_response.answer, + "message_time": new_chat_entry.message_time, + "prompt_title": (self.prompt.title if self.prompt else None), + "brain_name": self.brain.name if self.brain else None, + "message_id": new_chat_entry.message_id, + "brain_id": str(self.brain.brain_id) if self.brain else None, + "metadata": ( + parsed_response.metadata.model_dump() + if parsed_response.metadata + else {} + ), + } + ) + + async def generate_answer_stream( + self, + question: str, + ): + logger.info( + f"Creating question for chat {self.chat_id} with brain {self.brain.brain_id} " + ) + # Build the rag config + rag_config = RAGConfig( + model=self.model_to_use.name, + temperature=self.brain.temperature, + max_input=self.model_to_use.max_input, + max_tokens=self.brain.max_tokens, + prompt=self.prompt.content if self.prompt else "", + streaming=True, + ) + # Getting chat history + history = await self.chat_service.get_chat_history(self.chat_id) + # Format the history, sanitize the input + transformed_history = format_chat_history(history) + + # Get list of files urls + # TODO: Why do we get ALL the files ? + list_files = self.knowledge_service.get_all_knowledge_in_brain( + self.brain.brain_id + ) + llm = self.get_llm(rag_config) + vector_store = self.create_vector_store( + self.brain.brain_id, rag_config.max_input + ) + # Initialize the rag pipline + rag_pipeline = QuivrQARAG( + rag_config=rag_config, llm=llm, vector_store=vector_store + ) + + full_answer = "" + + message_metadata = { + "chat_id": self.chat_id, + "message_id": uuid4(), # do we need it ?, + "user_message": question, # TODO: define result + "message_time": datetime.datetime.now(), # TODO: define result + "prompt_title": (self.prompt.title if self.prompt else ""), + "brain_name": self.brain.name if self.brain else None, + "brain_id": self.brain.brain_id if self.brain else None, + } + + async for response in rag_pipeline.answer_astream( + question, transformed_history, list_files + ): + # Format output to be correct servicedf;j + if not response.last_chunk: + streamed_chat_history = GetChatHistoryOutput( + assistant=response.answer, + metadata=response.metadata.model_dump(), + **message_metadata, + ) + full_answer += response.answer + yield f"data: {streamed_chat_history.model_dump_json()}" + + # For last chunk parse the sources, and the full answer + streamed_chat_history = GetChatHistoryOutput( + assistant=response.answer, + metadata=response.metadata.model_dump(), + **message_metadata, + ) + + sources_urls = generate_source( + response.metadata.sources, + self.brain.brain_id, + ( + streamed_chat_history.metadata["citations"] + if streamed_chat_history.metadata + else None + ), + ) + if streamed_chat_history.metadata: + streamed_chat_history.metadata["sources"] = sources_urls + + self.save_answer( + question, + ParsedRAGResponse( + answer=full_answer, + metadata=RAGResponseMetadata(**streamed_chat_history.metadata), + ), + ) + yield f"data: {streamed_chat_history.model_dump_json()}" diff --git a/backend/core/quivr_core/api/packages/quivr_core/utils.py b/backend/core/quivr_core/api/packages/quivr_core/utils.py new file mode 100644 index 000000000000..8bff2ae9bcaa --- /dev/null +++ b/backend/core/quivr_core/api/packages/quivr_core/utils.py @@ -0,0 +1,272 @@ +import logging +from typing import Any, Dict, List, Tuple +from uuid import UUID + +from langchain.schema import ( + AIMessage, + BaseMessage, + HumanMessage, + SystemMessage, + format_document, +) +from langchain_core.messages.ai import AIMessageChunk + +from quivr_core.modules.chat.dto.chats import Sources +from quivr_core.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.modules.knowledge.entity.knowledge import Knowledge +from quivr_core.modules.upload.service.generate_file_signed_url import ( + generate_file_signed_url, +) +from quivr_core.packages.quivr_core.models import ( + ParsedRAGChunkResponse, + ParsedRAGResponse, + RAGResponseMetadata, + RawRAGResponse, +) +from quivr_core.packages.quivr_core.prompts import DEFAULT_DOCUMENT_PROMPT + +# TODO(@aminediro): define a types packages where we clearly define IO types +# This should be used for serialization/deseriallization later + + +logger = logging.getLogger(__name__) + + +def model_supports_function_calling(model_name: str): + models_supporting_function_calls = [ + "gpt-4", + "gpt-4-1106-preview", + "gpt-4-0613", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0613", + "gpt-4-0125-preview", + "gpt-3.5-turbo", + "gpt-4-turbo", + "gpt-4o", + ] + return model_name in models_supporting_function_calls + + +def format_chat_history( + history: List[GetChatHistoryOutput], +) -> List[Dict[str, str]]: + """Format the chat history into a list of HumanMessage and AIMessage""" + formatted_history = [] + for chat in history: + if chat.user_message: + formatted_history.append(HumanMessage(content=chat.user_message)) + if chat.assistant: + formatted_history.append(AIMessage(content=chat.assistant)) + return formatted_history + + +def format_history_to_openai_mesages( + tuple_history: List[Tuple[str, str]], system_message: str, question: str +) -> List[BaseMessage]: + """Format the chat history into a list of Base Messages""" + messages = [] + messages.append(SystemMessage(content=system_message)) + for human, ai in tuple_history: + messages.append(HumanMessage(content=human)) + messages.append(AIMessage(content=ai)) + messages.append(HumanMessage(content=question)) + return messages + + +def cited_answer_filter(tool): + return tool["name"] == "cited_answer" + + +def get_prev_message_str(msg: AIMessageChunk) -> str: + if msg.tool_calls: + cited_answer = next(x for x in msg.tool_calls if cited_answer_filter(x)) + if "args" in cited_answer and "answer" in cited_answer["args"]: + return cited_answer["args"]["answer"] + return "" + + +def get_chunk_metadata( + msg: AIMessageChunk, sources: list[Any] = [] +) -> RAGResponseMetadata: + # Initiate the source + metadata = {"sources": sources} + if msg.tool_calls: + cited_answer = next(x for x in msg.tool_calls if cited_answer_filter(x)) + + if "args" in cited_answer: + gathered_args = cited_answer["args"] + if "citations" in gathered_args: + citations = gathered_args["citations"] + metadata["citations"] = citations + + if "followup_questions" in gathered_args: + followup_questions = gathered_args["followup_questions"] + metadata["followup_questions"] = followup_questions + + if "thoughts" in gathered_args: + thoughts = gathered_args["thoughts"] + metadata["thoughts"] = thoughts + + return RAGResponseMetadata(**metadata) + + +# TODO: CONVOLUTED LOGIC ! +# TODO(@aminediro): redo this +def parse_chunk_response( + gathered_msg: AIMessageChunk, + raw_chunk: dict[str, Any], + supports_func_calling: bool, +) -> Tuple[AIMessageChunk, ParsedRAGChunkResponse]: + # Init with sources + answer_str = "" + # Get the previously parsed answer + prev_answer = get_prev_message_str(gathered_msg) + + if supports_func_calling: + gathered_msg += raw_chunk["answer"] + if gathered_msg.tool_calls: + cited_answer = next( + x for x in gathered_msg.tool_calls if cited_answer_filter(x) + ) + if "args" in cited_answer: + gathered_args = cited_answer["args"] + if "answer" in gathered_args: + # Only send the difference between answer and response_tokens which was the previous answer + gathered_answer = gathered_args["answer"] + answer_str: str = gathered_answer[len(prev_answer) :] + + return gathered_msg, ParsedRAGChunkResponse( + answer=answer_str, metadata=RAGResponseMetadata() + ) + else: + return gathered_msg, ParsedRAGChunkResponse( + answer=raw_chunk["answer"].content, metadata=RAGResponseMetadata() + ) + + +def parse_response(raw_response: RawRAGResponse, model_name: str) -> ParsedRAGResponse: + answer = raw_response["answer"].content + sources = raw_response["docs"] or [] + + metadata = {"sources": sources} + + if model_supports_function_calling(model_name): + if raw_response["answer"].tool_calls: + citations = raw_response["answer"].tool_calls[-1]["args"]["citations"] + metadata["citations"] = citations + followup_questions = raw_response["answer"].tool_calls[-1]["args"][ + "followup_questions" + ] + thoughts = raw_response["answer"].tool_calls[-1]["args"]["thoughts"] + if followup_questions: + metadata["followup_questions"] = followup_questions + if thoughts: + metadata["thoughts"] = thoughts + answer = raw_response["answer"].tool_calls[-1]["args"]["answer"] + + parsed_response = ParsedRAGResponse( + answer=answer, metadata=RAGResponseMetadata(**metadata) + ) + return parsed_response + + +def combine_documents( + docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n" +): + # for each docs, add an index in the metadata to be able to cite the sources + for doc, index in zip(docs, range(len(docs))): + doc.metadata["index"] = index + doc_strings = [format_document(doc, document_prompt) for doc in docs] + return document_separator.join(doc_strings) + + +def format_file_list(list_files_array: list[Knowledge], max_files: int = 20) -> str: + list_files = [file.file_name or file.url for file in list_files_array] + files: list[str] = list(filter(lambda n: n is not None, list_files)) # type: ignore + files = files[:max_files] + + files_str = "\n".join(files) if list_files_array else "None" + return files_str + + +# TODO: REFACTOR THIS, it does call the DB , so maybe in a service +def generate_source( + source_documents: List[Any] | None, + brain_id: UUID, + citations: List[int] | None = None, +) -> List[Sources]: + """ + Generate the sources list for the answer + It takes in a list of sources documents and citations that points to the docs index that was used in the answer + """ + # Initialize an empty list for sources + sources_list: List[Sources] = [] + + # Initialize a dictionary for storing generated URLs + generated_urls = {} + + # remove duplicate sources with same name and create a list of unique sources + sources_url_cache = {} + + # Get source documents from the result, default to an empty list if not found + # If source documents exist + if source_documents: + logger.info(f"Citations {citations}") + for index, doc in enumerate(source_documents): + logger.info(f"Processing source document {doc.metadata['file_name']}") + if citations is not None: + if index not in citations: + logger.info(f"Skipping source document {doc.metadata['file_name']}") + continue + # Check if 'url' is in the document metadata + is_url = ( + "original_file_name" in doc.metadata + and doc.metadata["original_file_name"] is not None + and doc.metadata["original_file_name"].startswith("http") + ) + + # Determine the name based on whether it's a URL or a file + name = ( + doc.metadata["original_file_name"] + if is_url + else doc.metadata["file_name"] + ) + + # Determine the type based on whether it's a URL or a file + type_ = "url" if is_url else "file" + + # Determine the source URL based on whether it's a URL or a file + if is_url: + source_url = doc.metadata["original_file_name"] + else: + file_path = f"{brain_id}/{doc.metadata['file_name']}" + # Check if the URL has already been generated + if file_path in generated_urls: + source_url = generated_urls[file_path] + else: + # Generate the URL + if file_path in sources_url_cache: + source_url = sources_url_cache[file_path] + else: + generated_url = generate_file_signed_url(file_path) + if generated_url is not None: + source_url = generated_url.get("signedURL", "") + else: + source_url = "" + # Store the generated URL + generated_urls[file_path] = source_url + + # Append a new Sources object to the list + sources_list.append( + Sources( + name=name, + type=type_, + source_url=source_url, + original_file_name=name, + citation=doc.page_content, + ) + ) + else: + logger.info("No source documents found or source_documents is not a list.") + return sources_list diff --git a/backend/core/quivr_core/api/packages/utils/__init__.py b/backend/core/quivr_core/api/packages/utils/__init__.py new file mode 100644 index 000000000000..c9e648c9004c --- /dev/null +++ b/backend/core/quivr_core/api/packages/utils/__init__.py @@ -0,0 +1,2 @@ +from .handle_request_validation_error import handle_request_validation_error +from .parse_message_time import parse_message_time diff --git a/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py b/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py new file mode 100644 index 000000000000..57e2d0ad06f8 --- /dev/null +++ b/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py @@ -0,0 +1,24 @@ +from fastapi import FastAPI, Request, status +from fastapi.exceptions import RequestValidationError +from fastapi.responses import JSONResponse + +from quivr_core.logger import get_logger + +logger = get_logger(__name__) + + +def handle_request_validation_error(app: FastAPI): + @app.exception_handler(RequestValidationError) + async def validation_exception_handler( + request: Request, exc: RequestValidationError + ): + exc_str = f"{exc}".replace("\n", " ").replace(" ", " ") + logger.error(request, exc_str) + content = { + "status_code": status.HTTP_422_UNPROCESSABLE_ENTITY, + "message": exc_str, + "data": None, + } + return JSONResponse( + content=content, status_code=status.HTTP_422_UNPROCESSABLE_ENTITY + ) diff --git a/backend/core/quivr_core/api/packages/utils/parse_message_time.py b/backend/core/quivr_core/api/packages/utils/parse_message_time.py new file mode 100644 index 000000000000..4aa18a3b8759 --- /dev/null +++ b/backend/core/quivr_core/api/packages/utils/parse_message_time.py @@ -0,0 +1,5 @@ +from datetime import datetime + + +def parse_message_time(message_time_str: str): + return datetime.strptime(message_time_str, "%Y-%m-%dT%H:%M:%S.%f") diff --git a/backend/core/quivr_core/api/packages/utils/telemetry.py b/backend/core/quivr_core/api/packages/utils/telemetry.py new file mode 100644 index 000000000000..a644d646e4f0 --- /dev/null +++ b/backend/core/quivr_core/api/packages/utils/telemetry.py @@ -0,0 +1,60 @@ +import hashlib +import json +import os +import threading + +import httpx +from fastapi import Request + +from quivr_core.logger import get_logger + +logger = get_logger(__name__) + +# Assume these are your Supabase Function endpoint and any necessary headers +TELEMETRY_URL = "https://ovbvcnwemowuuuaebizd.supabase.co/functions/v1/telemetry" +HEADERS = { + "Content-Type": "application/json", +} + + +def generate_machine_key(): + # Get the OpenAI API key from the environment variables + seed = os.getenv("OPENAI_API_KEY") + + # Use SHA-256 hash to generate a unique key from the seed + unique_key = hashlib.sha256(seed.encode()).hexdigest() + + return unique_key + + +def send_telemetry(event_name: str, event_data: dict, request: Request = None): + # Generate a unique machine key + machine_key = generate_machine_key() + domain = None + if request: + domain = request.url.hostname + logger.info(f"Domain: {domain}") + event_data = {**event_data, "domain": domain} + # Prepare the payload + payload = json.dumps( + { + "anonymous_identifier": machine_key, + "event_name": event_name, + "event_data": event_data, + } + ) + + # Send the telemetry data + with httpx.Client() as client: + _ = client.post(TELEMETRY_URL, headers=HEADERS, data=payload) + + +def maybe_send_telemetry(event_name: str, event_data: dict, request: Request = None): + enable_telemetry = os.getenv("TELEMETRY_ENABLED", "false") + + if enable_telemetry.lower() != "true": + return + + threading.Thread( + target=send_telemetry, args=(event_name, event_data, request) + ).start() diff --git a/backend/core/quivr_core/api/tests/conftest.py b/backend/core/quivr_core/api/tests/conftest.py new file mode 100644 index 000000000000..47621f5e72d4 --- /dev/null +++ b/backend/core/quivr_core/api/tests/conftest.py @@ -0,0 +1,61 @@ +import asyncio +import os + +import pytest +import pytest_asyncio +import sqlalchemy +from sqlalchemy.ext.asyncio import create_async_engine +from sqlmodel import create_engine +from sqlmodel.ext.asyncio.session import AsyncSession + +pg_database_url = "" + + +@pytest.fixture(scope="session", autouse=True) +def db_setup(): + # setup + sync_engine = create_engine( + "postgresql://" + pg_database_url, + echo=True if os.getenv("ORM_DEBUG") else False, + ) + # TODO(@amine) : for now don't drop anything + # SQLModel.metadata.create_all(sync_engine, checkfirst=True) + yield sync_engine + # teardown + # NOTE: For now we rely on Supabase migrations for defining schemas + # SQLModel.metadata.drop_all(sync_engine) + + +@pytest_asyncio.fixture(scope="session") +async def async_engine(): + engine = create_async_engine( + "postgresql+asyncpg://" + pg_database_url, + echo=True if os.getenv("ORM_DEBUG") else False, + ) + yield engine + + +@pytest.fixture(scope="session") +def event_loop(request: pytest.FixtureRequest): + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + +@pytest_asyncio.fixture() +async def session(async_engine): + async with async_engine.connect() as conn: + await conn.begin() + await conn.begin_nested() + async_session = AsyncSession(conn, expire_on_commit=False) + + @sqlalchemy.event.listens_for( + async_session.sync_session, "after_transaction_end" + ) + def end_savepoint(session, transaction): + if conn.closed: + return + if not conn.in_nested_transaction(): + conn.sync_connection.begin_nested() + + yield async_session From ad63ef4bf319a230933a5b1cf033dc73e0227b98 Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 11:04:13 +0200 Subject: [PATCH 02/20] core chat --- .../chat/controller/chat/brainful_chat.py | 113 ------------------ .../chat/controller/chat/brainless_chat.py | 27 ----- .../modules/chat/controller/chat/factory.py | 11 -- .../modules/chat/controller/chat/interface.py | 17 --- .../api/modules/chat/controller/chat/utils.py | 110 ----------------- .../modules/chat/controller/chat_routes.py | 97 ++------------- .../__init_.py => notification/__init__.py} | 0 .../controller/__init__.py} | 0 .../api/modules/notification/dto/__init__.py | 1 + .../api/modules/notification/dto/inputs.py | 26 ++++ .../api/modules/notification/dto/outputs.py | 0 .../modules/notification/entity/__init__.py | 1 + .../notification/entity/notification.py | 24 ++++ .../notification/repository/__init__.py | 1 + .../notification/repository/notifications.py | 68 +++++++++++ .../repository/notifications_interface.py | 36 ++++++ .../modules/notification/service/__init__.py | 0 .../service/notification_service.py | 34 ++++++ .../notification/tests/test_notification.py | 0 .../quivr_core/api/vectorstore/__init__.py | 0 .../quivr_core/api/vectorstore/supabase.py | 104 ++++++++++++++++ 21 files changed, 305 insertions(+), 365 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py delete mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py delete mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/factory.py delete mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/interface.py delete mode 100644 backend/core/quivr_core/api/modules/chat/controller/chat/utils.py rename backend/core/quivr_core/api/modules/{chat/controller/chat/__init_.py => notification/__init__.py} (100%) rename backend/core/quivr_core/api/modules/{chat/controller/chat/test_utils.py => notification/controller/__init__.py} (100%) create mode 100644 backend/core/quivr_core/api/modules/notification/dto/__init__.py create mode 100644 backend/core/quivr_core/api/modules/notification/dto/inputs.py create mode 100644 backend/core/quivr_core/api/modules/notification/dto/outputs.py create mode 100644 backend/core/quivr_core/api/modules/notification/entity/__init__.py create mode 100644 backend/core/quivr_core/api/modules/notification/entity/notification.py create mode 100644 backend/core/quivr_core/api/modules/notification/repository/__init__.py create mode 100644 backend/core/quivr_core/api/modules/notification/repository/notifications.py create mode 100644 backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py create mode 100644 backend/core/quivr_core/api/modules/notification/service/__init__.py create mode 100644 backend/core/quivr_core/api/modules/notification/service/notification_service.py create mode 100644 backend/core/quivr_core/api/modules/notification/tests/test_notification.py create mode 100644 backend/core/quivr_core/api/vectorstore/__init__.py create mode 100644 backend/core/quivr_core/api/vectorstore/supabase.py diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py b/backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py deleted file mode 100644 index 6dd65d1cb306..000000000000 --- a/backend/core/quivr_core/api/modules/chat/controller/chat/brainful_chat.py +++ /dev/null @@ -1,113 +0,0 @@ -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.entity.brain_entity import BrainType, RoleEnum -from quivr_core.api.modules.brain.integrations.Big.Brain import BigBrain -from quivr_core.api.modules.brain.integrations.GPT4.Brain import GPT4Brain -from quivr_core.api.modules.brain.integrations.Multi_Contract.Brain import ( - MultiContractBrain, -) -from quivr_core.api.modules.brain.integrations.Notion.Brain import NotionBrain -from quivr_core.api.modules.brain.integrations.Proxy.Brain import ProxyBrain -from quivr_core.api.modules.brain.integrations.Self.Brain import SelfBrain -from quivr_core.api.modules.brain.integrations.SQL.Brain import SQLBrain -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.brain.service.api_brain_definition_service import ( - ApiBrainDefinitionService, -) -from quivr_core.api.modules.brain.service.brain_authorization_service import ( - validate_brain_authorization, -) -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.brain.service.integration_brain_service import ( - IntegrationBrainDescriptionService, -) -from quivr_core.api.modules.chat.controller.chat.interface import ChatInterface -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service - -chat_service = get_service(ChatService)() -api_brain_definition_service = ApiBrainDefinitionService() -integration_brain_description_service = IntegrationBrainDescriptionService() - -logger = get_logger(__name__) - -models_supporting_function_calls = [ - "gpt-4", - "gpt-4-1106-preview", - "gpt-4-0613", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0613", - "gpt-4-0125-preview", - "gpt-3.5-turbo", - "gpt-4-turbo", - "gpt-4o", -] - - -integration_list = { - "notion": NotionBrain, - "gpt4": GPT4Brain, - "sql": SQLBrain, - "big": BigBrain, - "doc": KnowledgeBrainQA, - "proxy": ProxyBrain, - "self": SelfBrain, - "multi-contract": MultiContractBrain, -} - -brain_service = BrainService() - - -def validate_authorization(user_id, brain_id): - if brain_id: - validate_brain_authorization( - brain_id=brain_id, - user_id=user_id, - required_roles=[RoleEnum.Viewer, RoleEnum.Editor, RoleEnum.Owner], - ) - - -# TODO: redo this -class BrainfulChat(ChatInterface): - def get_answer_generator( - self, - brain, - chat_id, - chat_service, - model, - temperature, - streaming, - prompt_id, - user_id, - user_email, - ): - if brain and brain.brain_type == BrainType.doc: - return KnowledgeBrainQA( - chat_service=chat_service, - chat_id=chat_id, - brain_id=str(brain.brain_id), - streaming=streaming, - prompt_id=prompt_id, - user_id=user_id, - user_email=user_email, - ) - - if brain.brain_type == BrainType.integration: - integration_brain = integration_brain_description_service.get_integration_description_by_user_brain_id( - brain.brain_id, user_id - ) - - integration_class = integration_list.get( - integration_brain.integration_name.lower() - ) - if integration_class: - return integration_class( - chat_service=chat_service, - chat_id=chat_id, - temperature=temperature, - brain_id=str(brain.brain_id), - streaming=streaming, - prompt_id=prompt_id, - user_id=user_id, - user_email=user_email, - ) diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py b/backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py deleted file mode 100644 index 9021c7b99fba..000000000000 --- a/backend/core/quivr_core/api/modules/chat/controller/chat/brainless_chat.py +++ /dev/null @@ -1,27 +0,0 @@ -from llm.qa_headless import HeadlessQA - -from quivr_core.api.modules.chat.controller.chat.interface import ChatInterface - - -class BrainlessChat(ChatInterface): - def validate_authorization(self, user_id, brain_id): - pass - - def get_answer_generator( - self, - chat_id, - model, - max_tokens, - temperature, - streaming, - prompt_id, - user_id, - ): - return HeadlessQA( - chat_id=chat_id, - model=model, - max_tokens=max_tokens, - temperature=temperature, - streaming=streaming, - prompt_id=prompt_id, - ) diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/factory.py b/backend/core/quivr_core/api/modules/chat/controller/chat/factory.py deleted file mode 100644 index 792328feeffc..000000000000 --- a/backend/core/quivr_core/api/modules/chat/controller/chat/factory.py +++ /dev/null @@ -1,11 +0,0 @@ -from uuid import UUID - -from .brainful_chat import BrainfulChat -from .brainless_chat import BrainlessChat - - -def get_chat_strategy(brain_id: UUID | None = None): - if brain_id: - return BrainfulChat() - else: - return BrainlessChat() diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/interface.py b/backend/core/quivr_core/api/modules/chat/controller/chat/interface.py deleted file mode 100644 index a0bbc95c89c5..000000000000 --- a/backend/core/quivr_core/api/modules/chat/controller/chat/interface.py +++ /dev/null @@ -1,17 +0,0 @@ -from abc import ABC, abstractmethod - - -class ChatInterface(ABC): - @abstractmethod - def get_answer_generator( - self, - chat_id, - model, - max_tokens, - temperature, - streaming, - prompt_id, - user_id, - chat_question, - ): - pass diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/utils.py b/backend/core/quivr_core/api/modules/chat/controller/chat/utils.py deleted file mode 100644 index e0c7b6df4dcc..000000000000 --- a/backend/core/quivr_core/api/modules/chat/controller/chat/utils.py +++ /dev/null @@ -1,110 +0,0 @@ -import time -from uuid import UUID - -from fastapi import HTTPException - -from quivr_core.api.logger import get_logger -from quivr_core.api.models.databases.llm_models import LLMModel -from quivr_core.api.modules.user.service.user_usage import UserUsage - -logger = get_logger(__name__) - - -class NullableUUID(UUID): - @classmethod - def __get_validators__(cls): - yield cls.validate - - @classmethod - def validate(v, values, **kwargs): - logger.info(f"Validating UUID: {v}") - if v == "": - return None - try: - return UUID(v) - except ValueError: - return None - - -# TODO: rewrite -def compute_cost(model_to_use, models_settings): - model = model_to_use.name - user_choosen_model_price = 1000 - for model_setting in models_settings: - if model_setting["name"] == model: - user_choosen_model_price = model_setting["price"] - return user_choosen_model_price - - -# TODO: rewrite -def find_model_and_generate_metadata( - brain_model: str | None, - user_settings, - models_settings, -): - # Default model is gpt-3.5-turbo-0125 - default_model = "gpt-3.5-turbo-0125" - model_to_use = LLMModel( # TODO Implement default models in database - name=default_model, price=1, max_input=4000, max_output=1000 - ) - - logger.debug("Brain model: %s", brain_model) - - # If brain.model is None, set it to the default_model - if brain_model is None: - brain_model = default_model - - is_brain_model_available = any( - brain_model == model_dict.get("name") for model_dict in models_settings - ) - - is_user_allowed_model = brain_model in user_settings.get( - "models", [default_model] - ) # Checks if the model is available in the list of models - - logger.debug(f"Brain model: {brain_model}") - logger.debug(f"User models: {user_settings.get('models', [])}") - logger.debug(f"Model available: {is_brain_model_available}") - logger.debug(f"User allowed model: {is_user_allowed_model}") - - if is_brain_model_available and is_user_allowed_model: - # Use the model from the brain - model_to_use.name = brain_model - for model_dict in models_settings: - if model_dict.get("name") == model_to_use.name: - model_to_use.price = model_dict.get("price") - model_to_use.max_input = model_dict.get("max_input") - model_to_use.max_output = model_dict.get("max_output") - break - - logger.info(f"Model to use: {model_to_use}") - - return model_to_use - - -def update_user_usage(usage: UserUsage, user_settings, cost: int = 100): - """Checks the user requests limit. - It checks the user requests limit and raises an exception if the user has reached the limit. - By default, the user has a limit of 100 requests per month. The limit can be increased by upgrading the plan. - - Args: - user (UserIdentity): User object - model (str): Model name for which the user is making the request - - Raises: - HTTPException: Raises a 429 error if the user has reached the limit. - """ - - date = time.strftime("%Y%m%d") - - monthly_chat_credit = user_settings.get("monthly_chat_credit", 100) - montly_usage = usage.get_user_monthly_usage(date) - - if int(montly_usage + cost) > int(monthly_chat_credit): - raise HTTPException( - status_code=429, # pyright: ignore reportPrivateUsage=none - detail=f"You have reached your monthly chat limit of {monthly_chat_credit} requests per months. Please upgrade your plan to increase your monthly chat limit.", - ) - else: - usage.handle_increment_user_request_count(date, cost) - pass diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py index 3dafe6ace34b..a3e478c481d0 100644 --- a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py +++ b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py @@ -5,13 +5,7 @@ from fastapi.responses import StreamingResponse from quivr_core.api.logger import get_logger -from quivr_core.api.middlewares.auth import AuthBearer, get_current_user -from quivr_core.api.models.settings import get_embedding_client, get_supabase_client from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.chat.controller.chat.brainful_chat import ( - BrainfulChat, - validate_authorization, -) from quivr_core.api.modules.chat.dto.chats import ChatItem, ChatQuestion from quivr_core.api.modules.chat.dto.inputs import ( ChatMessageProperties, @@ -21,17 +15,15 @@ ) from quivr_core.api.modules.chat.entity.chat import Chat from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service +from quivr_core.api.modules.dependencies import get_current_user, get_service from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository from quivr_core.api.modules.prompt.service.prompt_service import PromptService from quivr_core.api.modules.user.entity.user_identity import UserIdentity from quivr_core.api.packages.quivr_core.rag_service import RAGService from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry -from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore logger = get_logger(__name__) -chat_router = APIRouter() brain_service = BrainService() knowledge_service = KnowledgeRepository() prompt_service = PromptService() @@ -40,53 +32,7 @@ ChatServiceDep = Annotated[ChatService, Depends(get_service(ChatService))] UserIdentityDep = Annotated[UserIdentity, Depends(get_current_user)] - -def init_vector_store(user_id: UUID) -> CustomSupabaseVectorStore: - """ - Initialize the vector store - """ - supabase_client = get_supabase_client() - embedding_service = get_embedding_client() - vector_store = CustomSupabaseVectorStore( - supabase_client, embedding_service, table_name="vectors", user_id=user_id - ) - - return vector_store - - -async def get_answer_generator( - chat_id: UUID, - chat_question: ChatQuestion, - chat_service: ChatService, - brain_id: UUID | None, - current_user: UserIdentity, -): - chat_instance = BrainfulChat() - vector_store = init_vector_store(user_id=current_user.id) - - # Get History only if needed - if not brain_id: - history = await chat_service.get_chat_history(chat_id) - else: - history = [] - - # TODO(@aminediro) : NOT USED anymore - brain, metadata_brain = brain_service.find_brain_from_question( - brain_id, chat_question.question, current_user, chat_id, history, vector_store - ) - gpt_answer_generator = chat_instance.get_answer_generator( - brain=brain, - chat_id=str(chat_id), - chat_service=chat_service, - model=brain.model, - temperature=0.1, - streaming=True, - prompt_id=chat_question.prompt_id, - user_id=current_user.id, - user_email=current_user.email, - ) - - return gpt_answer_generator +chat_router = APIRouter() @chat_router.get("/chat/healthz", tags=["Health"]) @@ -95,7 +41,7 @@ async def healthz(): # get all chats -@chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"]) +@chat_router.get("/chat", tags=["Chat"]) async def get_chats(current_user: UserIdentityDep, chat_service: ChatServiceDep): """ Retrieve all chats for the current user. @@ -111,9 +57,7 @@ async def get_chats(current_user: UserIdentityDep, chat_service: ChatServiceDep) # delete one chat -@chat_router.delete( - "/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"] -) +@chat_router.delete("/chat/{chat_id}", tags=["Chat"]) async def delete_chat(chat_id: UUID, chat_service: ChatServiceDep): """ Delete a specific chat by chat ID. @@ -124,9 +68,7 @@ async def delete_chat(chat_id: UUID, chat_service: ChatServiceDep): # update existing chat metadata -@chat_router.put( - "/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"] -) +@chat_router.put("/chat/{chat_id}/metadata", tags=["Chat"]) async def update_chat_metadata_handler( chat_data: ChatUpdatableProperties, chat_id: UUID, @@ -155,23 +97,20 @@ async def update_chat_message( current_user: UserIdentityDep, chat_service: ChatServiceDep, ): - chat = await chat_service.get_chat_by_id( - chat_id # pyright: ignore reportPrivateUsage=none - ) + chat = await chat_service.get_chat_by_id(chat_id) if str(current_user.id) != chat.user_id: raise HTTPException( - status_code=403, # pyright: ignore reportPrivateUsage=none - detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none + status_code=403, detail="You should be the owner of the chat to update it." ) return chat_service.update_chat_message( chat_id=chat_id, message_id=message_id, - chat_message_properties=chat_message_properties.dict(), + chat_message_properties=chat_message_properties, ) # create new chat -@chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"]) +@chat_router.post("/chat", tags=["Chat"]) async def create_chat_handler( chat_data: CreateChatProperties, current_user: UserIdentityDep, @@ -189,11 +128,6 @@ async def create_chat_handler( # add new question to chat @chat_router.post( "/chat/{chat_id}/question", - dependencies=[ - Depends( - AuthBearer(), - ), - ], tags=["Chat"], ) async def create_question_handler( @@ -204,8 +138,6 @@ async def create_question_handler( chat_service: ChatServiceDep, brain_id: Annotated[UUID | None, Query()] = None, ): - # TODO: check logic into middleware - validate_authorization(user_id=current_user.id, brain_id=brain_id) try: rag_service = RAGService( current_user, @@ -233,11 +165,6 @@ async def create_question_handler( # stream new question response from chat @chat_router.post( "/chat/{chat_id}/question/stream", - dependencies=[ - Depends( - AuthBearer(), - ), - ], tags=["Chat"], ) async def create_stream_question_handler( @@ -248,7 +175,6 @@ async def create_stream_question_handler( current_user: UserIdentityDep, brain_id: Annotated[UUID | None, Query()] = None, ) -> StreamingResponse: - validate_authorization(user_id=current_user.id, brain_id=brain_id) logger.info( f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}" @@ -272,9 +198,7 @@ async def create_stream_question_handler( # get chat history -@chat_router.get( - "/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"] -) +@chat_router.get("/chat/{chat_id}/history", tags=["Chat"]) async def get_chat_history_handler( chat_id: UUID, chat_service: ChatServiceDep, @@ -284,7 +208,6 @@ async def get_chat_history_handler( @chat_router.post( "/chat/{chat_id}/question/answer", - dependencies=[Depends(AuthBearer())], tags=["Chat"], ) async def add_question_and_answer_handler( diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/__init_.py b/backend/core/quivr_core/api/modules/notification/__init__.py similarity index 100% rename from backend/core/quivr_core/api/modules/chat/controller/chat/__init_.py rename to backend/core/quivr_core/api/modules/notification/__init__.py diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat/test_utils.py b/backend/core/quivr_core/api/modules/notification/controller/__init__.py similarity index 100% rename from backend/core/quivr_core/api/modules/chat/controller/chat/test_utils.py rename to backend/core/quivr_core/api/modules/notification/controller/__init__.py diff --git a/backend/core/quivr_core/api/modules/notification/dto/__init__.py b/backend/core/quivr_core/api/modules/notification/dto/__init__.py new file mode 100644 index 000000000000..726ac989c066 --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/dto/__init__.py @@ -0,0 +1 @@ +from .inputs import NotificationUpdatableProperties \ No newline at end of file diff --git a/backend/core/quivr_core/api/modules/notification/dto/inputs.py b/backend/core/quivr_core/api/modules/notification/dto/inputs.py new file mode 100644 index 000000000000..7aed0fa3c161 --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/dto/inputs.py @@ -0,0 +1,26 @@ +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel +from quivr_api.modules.notification.entity.notification import NotificationsStatusEnum + + +class CreateNotification(BaseModel): + """Properties that can be received on notification creation""" + + user_id: UUID + status: NotificationsStatusEnum + title: str + description: Optional[str] = None + + def model_dump(self, *args, **kwargs): + notification_dict = super().model_dump(*args, **kwargs) + notification_dict["user_id"] = str(notification_dict["user_id"]) + return notification_dict + + +class NotificationUpdatableProperties(BaseModel): + """Properties that can be received on notification update""" + + status: Optional[NotificationsStatusEnum] + description: Optional[str] diff --git a/backend/core/quivr_core/api/modules/notification/dto/outputs.py b/backend/core/quivr_core/api/modules/notification/dto/outputs.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/notification/entity/__init__.py b/backend/core/quivr_core/api/modules/notification/entity/__init__.py new file mode 100644 index 000000000000..209eacf8425f --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/entity/__init__.py @@ -0,0 +1 @@ +from .notification import Notification diff --git a/backend/core/quivr_core/api/modules/notification/entity/notification.py b/backend/core/quivr_core/api/modules/notification/entity/notification.py new file mode 100644 index 000000000000..4a1378b8d65f --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/entity/notification.py @@ -0,0 +1,24 @@ +from datetime import datetime +from enum import Enum +from typing import Optional +from uuid import UUID + +from pydantic import BaseModel + + +class NotificationsStatusEnum(str, Enum): + INFO = "info" + SUCCESS = "success" + WARNING = "warning" + ERROR = "error" + + +class Notification(BaseModel): + id: UUID + user_id: UUID + status: NotificationsStatusEnum + title: str + description: Optional[str] + archived: Optional[bool] = False + read: Optional[bool] = False + datetime: Optional[datetime] # timestamp diff --git a/backend/core/quivr_core/api/modules/notification/repository/__init__.py b/backend/core/quivr_core/api/modules/notification/repository/__init__.py new file mode 100644 index 000000000000..2d49d9356a1e --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/repository/__init__.py @@ -0,0 +1 @@ +from .notifications import Notifications diff --git a/backend/core/quivr_core/api/modules/notification/repository/notifications.py b/backend/core/quivr_core/api/modules/notification/repository/notifications.py new file mode 100644 index 000000000000..fc0af6073e52 --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/repository/notifications.py @@ -0,0 +1,68 @@ +from quivr_api.logger import get_logger +from quivr_api.modules.notification.dto.inputs import CreateNotification +from quivr_api.modules.notification.entity.notification import Notification +from quivr_api.modules.notification.repository.notifications_interface import ( + NotificationInterface, +) + +logger = get_logger(__name__) + + +class Notifications(NotificationInterface): + def __init__(self, supabase_client): + self.db = supabase_client + + def add_notification(self, notification: CreateNotification): + """ + Add a notification + """ + response = ( + self.db.from_("notifications").insert(notification.model_dump()).execute() + ).data + return Notification(**response[0]) + + def update_notification_by_id( + self, + notification_id, + notification, + ): + if notification_id is None: + logger.info("Notification id is required") + return None + + """Update a notification by id""" + response = ( + self.db.from_("notifications") + .update(notification.model_dump(exclude_unset=True)) + .filter("id", "eq", notification_id) + .execute() + ).data + + if response == []: + logger.info(f"Notification with id {notification_id} not found") + return None + + return Notification(**response[0]) + + def remove_notification_by_id(self, notification_id): + """ + Remove a notification by id + Args: + notification_id (UUID): The id of the notification + + Returns: + str: Status message + """ + response = ( + self.db.from_("notifications") + .delete() + .filter("id", "eq", notification_id) + .execute() + .data + ) + + if response == []: + logger.info(f"Notification with id {notification_id} not found") + return None + + return {"status": "success"} diff --git a/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py b/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py new file mode 100644 index 000000000000..e80740a825b3 --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py @@ -0,0 +1,36 @@ +from abc import ABC, abstractmethod +from uuid import UUID + +from quivr_api.modules.notification.dto.inputs import ( + CreateNotification, + NotificationUpdatableProperties, +) +from quivr_api.modules.notification.entity.notification import Notification + + +class NotificationInterface(ABC): + @abstractmethod + def add_notification(self, notification: CreateNotification) -> Notification: + """ + Add a notification + """ + pass + + @abstractmethod + def update_notification_by_id( + self, notification_id: UUID, notification: NotificationUpdatableProperties + ) -> Notification: + """Update a notification by id""" + pass + + @abstractmethod + def remove_notification_by_id(self, notification_id: UUID): + """ + Remove a notification by id + Args: + notification_id (UUID): The id of the notification + + Returns: + str: Status message + """ + pass diff --git a/backend/core/quivr_core/api/modules/notification/service/__init__.py b/backend/core/quivr_core/api/modules/notification/service/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/notification/service/notification_service.py b/backend/core/quivr_core/api/modules/notification/service/notification_service.py new file mode 100644 index 000000000000..295bcd9c91da --- /dev/null +++ b/backend/core/quivr_core/api/modules/notification/service/notification_service.py @@ -0,0 +1,34 @@ +from quivr_api.models.settings import get_supabase_client +from quivr_api.modules.notification.dto.inputs import ( + CreateNotification, + NotificationUpdatableProperties, +) +from quivr_api.modules.notification.repository.notifications import Notifications +from quivr_api.modules.notification.repository.notifications_interface import ( + NotificationInterface, +) + + +class NotificationService: + repository: NotificationInterface + + def __init__(self): + supabase_client = get_supabase_client() + self.repository = Notifications(supabase_client) + + def add_notification(self, notification: CreateNotification): + """ + Add a notification + """ + return self.repository.add_notification(notification) + + def update_notification_by_id( + self, notification_id, notification: NotificationUpdatableProperties + ): + """ + Update a notification + """ + if notification: + return self.repository.update_notification_by_id( + notification_id, notification + ) diff --git a/backend/core/quivr_core/api/modules/notification/tests/test_notification.py b/backend/core/quivr_core/api/modules/notification/tests/test_notification.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/vectorstore/__init__.py b/backend/core/quivr_core/api/vectorstore/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/vectorstore/supabase.py b/backend/core/quivr_core/api/vectorstore/supabase.py new file mode 100644 index 000000000000..9e017c8955f1 --- /dev/null +++ b/backend/core/quivr_core/api/vectorstore/supabase.py @@ -0,0 +1,104 @@ +from typing import Any, List +from uuid import UUID + +from langchain.docstore.document import Document +from langchain.embeddings.base import Embeddings +from langchain_community.vectorstores import SupabaseVectorStore +from quivr_api.logger import get_logger +from supabase.client import Client + +logger = get_logger(__name__) + + +class CustomSupabaseVectorStore(SupabaseVectorStore): + """A custom vector store that uses the match_vectors table instead of the vectors table.""" + + def __init__( + self, + client: Client, + embedding: Embeddings, + table_name: str, + brain_id: UUID | None = None, + user_id: UUID | None = None, + number_docs: int = 35, + max_input: int = 2000, + ): + super().__init__(client, embedding, table_name) + self.brain_id = brain_id + self.user_id = user_id + self.number_docs = number_docs + self.max_input = max_input + + def find_brain_closest_query( + self, + user_id: str, + query: str, + k: int = 6, + table: str = "match_brain", + threshold: float = 0.5, + ) -> list[dict[str, Any]]: + vectors = self._embedding.embed_documents([query]) + query_embedding = vectors[0] + + res = self._client.rpc( + table, + { + "query_embedding": query_embedding, + "match_count": self.number_docs, + "p_user_id": str(self.user_id), + }, + ).execute() + + # Get the brain_id of the brain that is most similar to the query + # Get the brain_id and name of the brains that are most similar to the query + brain_details = [ + { + "id": item.get("id", None), + "name": item.get("name", None), + "similarity": item.get("similarity", 0.0), + } + for item in res.data + ] + return brain_details + + def similarity_search( + self, + query: str, + k: int = 40, + table: str = "match_vectors", + threshold: float = 0.5, + **kwargs: Any, + ) -> List[Document]: + vectors = self._embedding.embed_documents([query]) + query_embedding = vectors[0] + res = self._client.rpc( + table, + { + "query_embedding": query_embedding, + "max_chunk_sum": self.max_input, + "p_brain_id": str(self.brain_id), + }, + ).execute() + + match_result = [ + Document( + metadata={ + **search.get("metadata", {}), + "id": search.get("id", ""), + "similarity": search.get("similarity", 0.0), + }, + page_content=search.get("content", ""), + ) + for search in res.data + if search.get("content") + ] + + sorted_match_result_by_file_name_metadata = sorted( + match_result, + key=lambda x: ( + x.metadata.get("file_name", ""), + x.metadata.get("index", float("inf")), + ), + ) + + return sorted_match_result_by_file_name_metadata From 60e3699d3719ca89984ac9ff152a95aa107d5f50 Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 11:04:31 +0200 Subject: [PATCH 03/20] core chat --- .../core/quivr_core/api/modules/chat/controller/chat_routes.py | 1 - .../core/quivr_core/api/modules/notification/dto/__init__.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py index a3e478c481d0..7c57d9da6514 100644 --- a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py +++ b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py @@ -175,7 +175,6 @@ async def create_stream_question_handler( current_user: UserIdentityDep, brain_id: Annotated[UUID | None, Query()] = None, ) -> StreamingResponse: - logger.info( f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}" ) diff --git a/backend/core/quivr_core/api/modules/notification/dto/__init__.py b/backend/core/quivr_core/api/modules/notification/dto/__init__.py index 726ac989c066..2d81927d4b12 100644 --- a/backend/core/quivr_core/api/modules/notification/dto/__init__.py +++ b/backend/core/quivr_core/api/modules/notification/dto/__init__.py @@ -1 +1 @@ -from .inputs import NotificationUpdatableProperties \ No newline at end of file +from .inputs import NotificationUpdatableProperties From a326082abcc859449694ae7dcf6233cafdea02ad Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 12:30:10 +0200 Subject: [PATCH 04/20] core prompt --- .../prompt/controller/prompt_routes.py | 15 +++++-------- .../api/modules/prompt/repository/prompts.py | 21 ++++++++++++------- .../prompt/service/get_prompt_to_use.py | 17 --------------- .../modules/prompt/service/prompt_service.py | 15 +++++++++++-- 4 files changed, 31 insertions(+), 37 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py diff --git a/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py b/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py index e51c5b540927..0415be6a0e3e 100644 --- a/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py +++ b/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py @@ -1,8 +1,7 @@ from uuid import UUID -from fastapi import APIRouter, Depends +from fastapi import APIRouter -from quivr_core.api.middlewares.auth import AuthBearer from quivr_core.api.modules.prompt.entity.prompt import ( CreatePromptProperties, Prompt, @@ -15,7 +14,7 @@ promptService = PromptService() -@prompt_router.get("/prompts", dependencies=[Depends(AuthBearer())], tags=["Prompt"]) +@prompt_router.get("/prompts", tags=["Prompt"]) async def get_prompts() -> list[Prompt]: """ Retrieve all public prompt @@ -23,9 +22,7 @@ async def get_prompts() -> list[Prompt]: return promptService.get_public_prompts() -@prompt_router.get( - "/prompts/{prompt_id}", dependencies=[Depends(AuthBearer())], tags=["Prompt"] -) +@prompt_router.get("/prompts/{prompt_id}", tags=["Prompt"]) async def get_prompt(prompt_id: UUID) -> Prompt | None: """ Retrieve a prompt by its id @@ -34,9 +31,7 @@ async def get_prompt(prompt_id: UUID) -> Prompt | None: return promptService.get_prompt_by_id(prompt_id) -@prompt_router.put( - "/prompts/{prompt_id}", dependencies=[Depends(AuthBearer())], tags=["Prompt"] -) +@prompt_router.put("/prompts/{prompt_id}", tags=["Prompt"]) async def update_prompt( prompt_id: UUID, prompt: PromptUpdatableProperties ) -> Prompt | None: @@ -47,7 +42,7 @@ async def update_prompt( return promptService.update_prompt_by_id(prompt_id, prompt) -@prompt_router.post("/prompts", dependencies=[Depends(AuthBearer())], tags=["Prompt"]) +@prompt_router.post("/prompts", tags=["Prompt"]) async def create_prompt_route(prompt: CreatePromptProperties) -> Prompt | None: """ Create a prompt by its id diff --git a/backend/core/quivr_core/api/modules/prompt/repository/prompts.py b/backend/core/quivr_core/api/modules/prompt/repository/prompts.py index 6cf416b170cb..c04d94aa10f5 100644 --- a/backend/core/quivr_core/api/modules/prompt/repository/prompts.py +++ b/backend/core/quivr_core/api/modules/prompt/repository/prompts.py @@ -1,3 +1,5 @@ +from uuid import UUID + from fastapi import HTTPException from quivr_core.api.models.settings import get_supabase_client @@ -22,7 +24,7 @@ def create_prompt(self, prompt): return Prompt(**response[0]) - def delete_prompt_by_id(self, prompt_id): + def delete_prompt_by_id(self, prompt_id: UUID): """ Delete a prompt by id Args: @@ -34,19 +36,19 @@ def delete_prompt_by_id(self, prompt_id): # Update brains where prompt_id is equal to the value to NULL self.db.from_("brains").update({"prompt_id": None}).filter( - "prompt_id", "eq", prompt_id + "prompt_id", "eq", str(prompt_id) ).execute() # Update chat_history where prompt_id is equal to the value to NULL self.db.from_("chat_history").update({"prompt_id": None}).filter( - "prompt_id", "eq", prompt_id + "prompt_id", "eq", str(prompt_id) ).execute() # Delete the prompt response = ( self.db.from_("prompts") .delete() - .filter("id", "eq", prompt_id) + .filter("id", "eq", str(prompt_id)) .execute() .data ) @@ -56,7 +58,7 @@ def delete_prompt_by_id(self, prompt_id): return DeletePromptResponse(status="deleted", prompt_id=prompt_id) - def get_prompt_by_id(self, prompt_id): + def get_prompt_by_id(self, prompt_id: UUID): """ Get a prompt by its id @@ -68,7 +70,10 @@ def get_prompt_by_id(self, prompt_id): """ response = ( - self.db.from_("prompts").select("*").filter("id", "eq", prompt_id).execute() + self.db.from_("prompts") + .select("*") + .filter("id", "eq", str(prompt_id)) + .execute() ).data if response == []: @@ -87,13 +92,13 @@ def get_public_prompts(self): .execute() ).data - def update_prompt_by_id(self, prompt_id, prompt): + def update_prompt_by_id(self, prompt_id: UUID, prompt): """Update a prompt by id""" response = ( self.db.from_("prompts") .update(prompt.dict(exclude_unset=True)) - .filter("id", "eq", prompt_id) + .filter("id", "eq", str(prompt_id)) .execute() ).data diff --git a/backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py b/backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py deleted file mode 100644 index f96594d41452..000000000000 --- a/backend/core/quivr_core/api/modules/prompt/service/get_prompt_to_use.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Optional -from uuid import UUID - -from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( - get_prompt_to_use_id, -) -from quivr_core.api.modules.prompt.service import PromptService - -promptService = PromptService() - - -def get_prompt_to_use(brain_id: Optional[UUID], prompt_id: Optional[UUID]) -> str: - prompt_to_use_id = get_prompt_to_use_id(brain_id, prompt_id) - if prompt_to_use_id is None: - return None - - return promptService.get_prompt_by_id(prompt_to_use_id) diff --git a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py index f1659303f14c..24ea0b3ee08a 100644 --- a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py +++ b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py @@ -1,7 +1,9 @@ from typing import List from uuid import UUID -from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( + get_prompt_to_use_id, +) from quivr_core.api.modules.prompt.entity.prompt import ( CreatePromptProperties, DeletePromptResponse, @@ -15,7 +17,6 @@ class PromptService: repository: Prompts def __init__(self): - supabase_client = get_supabase_client() self.repository = Prompts() def create_prompt(self, prompt: CreatePromptProperties) -> Prompt: @@ -57,3 +58,13 @@ def update_prompt_by_id( """Update a prompt by id""" return self.repository.update_prompt_by_id(prompt_id, prompt) + + def get_prompt_to_use( + self, brain_id: UUID | None, prompt_id: UUID | None + ) -> Prompt | None: + prompt_to_use_id = get_prompt_to_use_id(brain_id, prompt_id) + + if prompt_to_use_id is None: + return None + + return self.get_prompt_by_id(prompt_to_use_id) From 251e26cfb212bb505335d62ea2120ccdfc413fe4 Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 12:35:05 +0200 Subject: [PATCH 05/20] core upload --- .../upload/controller/upload_routes.py | 27 ++----------------- .../api/modules/upload/service/upload_file.py | 1 - 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py b/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py index a8da251b4daa..605a22935f62 100644 --- a/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py +++ b/backend/core/quivr_core/api/modules/upload/controller/upload_routes.py @@ -6,11 +6,7 @@ from quivr_core.api.celery_worker import process_file_and_notify from quivr_core.api.logger import get_logger -from quivr_core.api.middlewares.auth import AuthBearer, get_current_user -from quivr_core.api.modules.brain.entity.brain_entity import RoleEnum -from quivr_core.api.modules.brain.service.brain_authorization_service import ( - validate_brain_authorization, -) +from quivr_core.api.modules.dependencies import get_current_user from quivr_core.api.modules.knowledge.dto.inputs import CreateKnowledgeProperties from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService from quivr_core.api.modules.notification.dto.inputs import ( @@ -25,8 +21,6 @@ ) from quivr_core.api.modules.upload.service.upload_file import upload_file_storage from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from quivr_core.api.modules.user.service.user_usage import UserUsage -from quivr_core.api.packages.files.file import convert_bytes, get_file_size from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry logger = get_logger(__name__) @@ -41,22 +35,13 @@ async def healthz(): return {"status": "ok"} -@upload_router.post("/upload", dependencies=[Depends(AuthBearer())], tags=["Upload"]) +@upload_router.post("/upload", tags=["Upload"]) async def upload_file( uploadFile: UploadFile, brain_id: UUID = Query(..., description="The ID of the brain"), chat_id: Optional[UUID] = Query(None, description="The ID of the chat"), current_user: UserIdentity = Depends(get_current_user), ): - validate_brain_authorization( - brain_id, current_user.id, [RoleEnum.Editor, RoleEnum.Owner] - ) - uploadFile.file.seek(0) - user_daily_usage = UserUsage( - id=current_user.id, - email=current_user.email, - ) - upload_notification = notification_service.add_notification( CreateNotification( user_id=current_user.id, @@ -65,17 +50,9 @@ async def upload_file( ) ) - user_settings = user_daily_usage.get_user_settings() - - remaining_free_space = user_settings.get("max_brain_size", 1000000000) maybe_send_telemetry("upload_file", {"file_name": uploadFile.filename}) - file_size = get_file_size(uploadFile) - if remaining_free_space - file_size < 0: - message = f"Brain will exceed maximum capacity. Maximum file allowed is : {convert_bytes(remaining_free_space)}" - raise HTTPException(status_code=403, detail=message) file_content = await uploadFile.read() - filename_with_brain_id = str(brain_id) + "/" + str(uploadFile.filename) try: diff --git a/backend/core/quivr_core/api/modules/upload/service/upload_file.py b/backend/core/quivr_core/api/modules/upload/service/upload_file.py index 21db1698078f..d3733fab8a06 100644 --- a/backend/core/quivr_core/api/modules/upload/service/upload_file.py +++ b/backend/core/quivr_core/api/modules/upload/service/upload_file.py @@ -1,6 +1,5 @@ import json import os -from multiprocessing import get_logger from langchain.pydantic_v1 import Field from langchain.schema import Document From 0b29c936bbfcf866b0d478540474ad673289be25 Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 12:52:33 +0200 Subject: [PATCH 06/20] removed integration brain --- .../modules/brain/integrations/Big/Brain.py | 146 ------ .../brain/integrations/Big/__init__.py | 0 .../brain/integrations/Claude/Brain.py | 101 ---- .../brain/integrations/Claude/__init__.py | 0 .../modules/brain/integrations/GPT4/Brain.py | 283 ---------- .../brain/integrations/GPT4/__init__.py | 0 .../integrations/Multi_Contract/Brain.py | 205 -------- .../integrations/Multi_Contract/__init__.py | 0 .../brain/integrations/Notion/Brain.py | 25 - .../integrations/Notion/Notion_connector.py | 393 -------------- .../brain/integrations/Notion/__init__.py | 0 .../modules/brain/integrations/Proxy/Brain.py | 135 ----- .../brain/integrations/Proxy/__init__.py | 0 .../modules/brain/integrations/SQL/Brain.py | 104 ---- .../brain/integrations/SQL/SQL_connector.py | 41 -- .../brain/integrations/SQL/__init__.py | 0 .../modules/brain/integrations/Self/Brain.py | 487 ------------------ .../brain/integrations/Self/__init__.py | 0 .../modules/brain/integrations/__init__.py | 0 19 files changed, 1920 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Big/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Claude/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/GPT4/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Notion/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Proxy/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/SQL/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/Self/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/integrations/__init__.py diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py deleted file mode 100644 index ed09ceac6bd0..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Big/Brain.py +++ /dev/null @@ -1,146 +0,0 @@ -import json -from typing import AsyncIterable -from uuid import UUID - -from langchain.chains import ConversationalRetrievalChain, LLMChain -from langchain.chains.question_answering import load_qa_chain -from langchain_community.chat_models import ChatLiteLLM -from langchain_core.prompts.chat import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - SystemMessagePromptTemplate, -) -from langchain_core.prompts.prompt import PromptTemplate - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.chat.dto.chats import ChatQuestion - -logger = get_logger(__name__) - - -class BigBrain(KnowledgeBrainQA): - """ - The BigBrain class integrates advanced conversational retrieval and language model chains - to provide comprehensive and context-aware responses to user queries. - - It leverages a combination of document retrieval, question condensation, and document-based - question answering to generate responses that are informed by a wide range of knowledge sources. - """ - - def __init__( - self, - **kwargs, - ): - """ - Initializes the BigBrain class with specific configurations. - - Args: - **kwargs: Arbitrary keyword arguments. - """ - super().__init__( - **kwargs, - ) - - def get_chain(self): - """ - Constructs and returns the conversational QA chain used by BigBrain. - - Returns: - A ConversationalRetrievalChain instance. - """ - system_template = """Combine these summaries in a way that makes sense and answer the user's question. - Use markdown or any other techniques to display the content in a nice and aerated way. Answer in the language of the question. - Here are user instructions on how to respond: {custom_personality} - ______________________ - {summaries}""" - messages = [ - SystemMessagePromptTemplate.from_template(system_template), - HumanMessagePromptTemplate.from_template("{question}"), - ] - CHAT_COMBINE_PROMPT = ChatPromptTemplate.from_messages(messages) - - ### Question prompt - question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question. - Return any relevant text verbatim. Return the answer in the same language as the question. If the answer is not in the text, just say nothing in the same language as the question. - {context} - Question: {question} - Relevant text, if any, else say Nothing:""" - QUESTION_PROMPT = PromptTemplate( - template=question_prompt_template, input_variables=["context", "question"] - ) - - ### Condense Question Prompt - - _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question in exactly the same language as the original question. - - Chat History: - {chat_history} - Follow Up Input: {question} - Standalone question in same language as question:""" - CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - - api_base = None - if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): - api_base = self.brain_settings.ollama_api_base_url - - llm = ChatLiteLLM( - temperature=0, - model=self.model, - api_base=api_base, - max_tokens=self.max_tokens, - ) - - retriever_doc = self.knowledge_qa.get_retriever() - - question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) - doc_chain = load_qa_chain( - llm, - chain_type="map_reduce", - question_prompt=QUESTION_PROMPT, - combine_prompt=CHAT_COMBINE_PROMPT, - ) - - chain = ConversationalRetrievalChain( - retriever=retriever_doc, - question_generator=question_generator, - combine_docs_chain=doc_chain, - ) - - return chain - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - """ - Generates a stream of responses for a given question in real-time. - - Args: - chat_id (UUID): The unique identifier for the chat session. - question (ChatQuestion): The question object containing the user's query. - save_answer (bool): Flag indicating whether to save the answer to the chat history. - - Returns: - An asynchronous iterable of response strings. - """ - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - response_tokens = [] - - async for chunk in conversational_qa_chain.astream( - { - "question": question.question, - "chat_history": transformed_history, - "custom_personality": ( - self.prompt_to_use.content if self.prompt_to_use else None - ), - } - ): - if "answer" in chunk: - response_tokens.append(chunk["answer"]) - streamed_chat_history.assistant = chunk["answer"] - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Big/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Big/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py deleted file mode 100644 index c667e4ae4615..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Claude/Brain.py +++ /dev/null @@ -1,101 +0,0 @@ -import json -from typing import AsyncIterable -from uuid import UUID - -from langchain_community.chat_models import ChatLiteLLM -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder - -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.chat.dto.chats import ChatQuestion - - -class ClaudeBrain(KnowledgeBrainQA): - """ - ClaudeBrain integrates with Claude model to provide conversational AI capabilities. - It leverages the Claude model for generating responses based on the provided context. - - Attributes: - **kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization. - """ - - def __init__( - self, - **kwargs, - ): - """ - Initializes the ClaudeBrain with the given arguments. - - Args: - **kwargs: Arbitrary keyword arguments. - """ - super().__init__( - **kwargs, - ) - - def calculate_pricing(self): - """ - Calculates the pricing for using the ClaudeBrain. - - Returns: - int: The pricing value. - """ - return 3 - - def get_chain(self): - """ - Constructs and returns the conversational chain for ClaudeBrain. - - Returns: - A conversational chain object. - """ - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are Claude powered by Quivr. You are an assistant. {custom_personality}", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{question}"), - ] - ) - - chain = prompt | ChatLiteLLM( - model="claude-3-haiku-20240307", max_tokens=self.max_tokens - ) - - return chain - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - """ - Generates a stream of responses for the given question. - - Args: - chat_id (UUID): The chat session ID. - question (ChatQuestion): The question object. - save_answer (bool): Whether to save the answer. - - Yields: - AsyncIterable: A stream of response strings. - """ - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - response_tokens = [] - - async for chunk in conversational_qa_chain.astream( - { - "question": question.question, - "chat_history": transformed_history, - "custom_personality": ( - self.prompt_to_use.content if self.prompt_to_use else None - ), - } - ): - response_tokens.append(chunk.content) - streamed_chat_history.assistant = chunk.content - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Claude/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Claude/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py deleted file mode 100644 index 100109544a6c..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/GPT4/Brain.py +++ /dev/null @@ -1,283 +0,0 @@ -import json -import operator -from typing import Annotated, AsyncIterable, List, Optional, Sequence, TypedDict -from uuid import UUID - -from langchain.tools import BaseTool -from langchain_core.messages import BaseMessage, ToolMessage -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder -from langchain_core.tools import BaseTool -from langchain_openai import ChatOpenAI -from langgraph.graph import END, StateGraph -from langgraph.prebuilt import ToolExecutor, ToolInvocation - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.chat.dto.chats import ChatQuestion -from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service -from quivr_core.api.modules.tools import ( - EmailSenderTool, - ImageGeneratorTool, - URLReaderTool, - WebSearchTool, -) - - -class AgentState(TypedDict): - messages: Annotated[Sequence[BaseMessage], operator.add] - - -logger = get_logger(__name__) - -chat_service = get_service(ChatService)() - - -class GPT4Brain(KnowledgeBrainQA): - """ - GPT4Brain integrates with GPT-4 to provide real-time answers and supports various tools to enhance its capabilities. - - Available Tools: - - WebSearchTool: Performs web searches to find relevant information. - - ImageGeneratorTool: Generates images based on textual descriptions. - - URLReaderTool: Reads and summarizes content from URLs. - - EmailSenderTool: Sends emails with specified content. - - Use Cases: - - WebSearchTool can be used to find the latest news articles on a specific topic or to gather information from various websites. - - ImageGeneratorTool is useful for creating visual content based on textual prompts, such as generating a company logo based on a description. - - URLReaderTool can be used to summarize articles or web pages, making it easier to quickly understand the content without reading the entire text. - - EmailSenderTool enables automated email sending, such as sending a summary of a meeting's minutes to all participants. - """ - - tools: Optional[List[BaseTool]] = None - tool_executor: Optional[ToolExecutor] = None - function_model: ChatOpenAI = None - - def __init__( - self, - **kwargs, - ): - super().__init__( - **kwargs, - ) - self.tools = [ - WebSearchTool(), - ImageGeneratorTool(), - URLReaderTool(), - EmailSenderTool(user_email=self.user_email), - ] - self.tool_executor = ToolExecutor(tools=self.tools) - - def calculate_pricing(self): - return 3 - - def should_continue(self, state): - messages = state["messages"] - last_message = messages[-1] - # Make sure there is a previous message - - if last_message.tool_calls: - name = last_message.tool_calls[0]["name"] - if name == "image-generator": - return "final" - # If there is no function call, then we finish - if not last_message.tool_calls: - return "end" - # Otherwise if there is, we check if it's suppose to return direct - else: - return "continue" - - # Define the function that calls the model - def call_model(self, state): - messages = state["messages"] - response = self.function_model.invoke(messages) - # We return a list, because this will get added to the existing list - return {"messages": [response]} - - # Define the function to execute tools - def call_tool(self, state): - messages = state["messages"] - # Based on the continue condition - # we know the last message involves a function call - last_message = messages[-1] - # We construct an ToolInvocation from the function_call - tool_call = last_message.tool_calls[0] - tool_name = tool_call["name"] - arguments = tool_call["args"] - - action = ToolInvocation( - tool=tool_call["name"], - tool_input=tool_call["args"], - ) - # We call the tool_executor and get back a response - response = self.tool_executor.invoke(action) - # We use the response to create a FunctionMessage - function_message = ToolMessage( - content=str(response), name=action.tool, tool_call_id=tool_call["id"] - ) - # We return a list, because this will get added to the existing list - return {"messages": [function_message]} - - def create_graph(self): - # Define a new graph - workflow = StateGraph(AgentState) - - # Define the two nodes we will cycle between - workflow.add_node("agent", self.call_model) - workflow.add_node("action", self.call_tool) - workflow.add_node("final", self.call_tool) - - # Set the entrypoint as `agent` - # This means that this node is the first one called - workflow.set_entry_point("agent") - - # We now add a conditional edge - workflow.add_conditional_edges( - # First, we define the start node. We use `agent`. - # This means these are the edges taken after the `agent` node is called. - "agent", - # Next, we pass in the function that will determine which node is called next. - self.should_continue, - # Finally we pass in a mapping. - # The keys are strings, and the values are other nodes. - # END is a special node marking that the graph should finish. - # What will happen is we will call `should_continue`, and then the output of that - # will be matched against the keys in this mapping. - # Based on which one it matches, that node will then be called. - { - # If `tools`, then we call the tool node. - "continue": "action", - # Final call - "final": "final", - # Otherwise we finish. - "end": END, - }, - ) - - # We now add a normal edge from `tools` to `agent`. - # This means that after `tools` is called, `agent` node is called next. - workflow.add_edge("action", "agent") - workflow.add_edge("final", END) - - # Finally, we compile it! - # This compiles it into a LangChain Runnable, - # meaning you can use it as you would any other runnable - app = workflow.compile() - return app - - def get_chain(self): - self.function_model = ChatOpenAI(model="gpt-4o", temperature=0, streaming=True) - - self.function_model = self.function_model.bind_tools(self.tools) - - graph = self.create_graph() - - return graph - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - filtered_history = self.filter_history(transformed_history, 40, 2000) - response_tokens = [] - config = {"metadata": {"conversation_id": str(chat_id)}} - - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{question}"), - ] - ) - prompt_formated = prompt.format_messages( - chat_history=filtered_history, - question=question.question, - custom_personality=( - self.prompt_to_use.content if self.prompt_to_use else None - ), - ) - - async for event in conversational_qa_chain.astream_events( - {"messages": prompt_formated}, - config=config, - version="v1", - ): - kind = event["event"] - if kind == "on_chat_model_stream": - content = event["data"]["chunk"].content - if content: - # Empty content in the context of OpenAI or Anthropic usually means - # that the model is asking for a tool to be invoked. - # So we only print non-empty content - response_tokens.append(content) - streamed_chat_history.assistant = content - yield f"data: {json.dumps(streamed_chat_history.dict())}" - elif kind == "on_tool_start": - print("--") - print( - f"Starting tool: {event['name']} with inputs: {event['data'].get('input')}" - ) - elif kind == "on_tool_end": - print(f"Done tool: {event['name']}") - print(f"Tool output was: {event['data'].get('output')}") - print("--") - elif kind == "on_chain_end": - output = event["data"]["output"] - final_output = [item for item in output if "final" in item] - if final_output: - if ( - final_output[0]["final"]["messages"][0].name - == "image-generator" - ): - final_message = final_output[0]["final"]["messages"][0].content - response_tokens.append(final_message) - streamed_chat_history.assistant = final_message - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) - - def generate_answer( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> GetChatHistoryOutput: - conversational_qa_chain = self.get_chain() - transformed_history, _ = self.initialize_streamed_chat_history( - chat_id, question - ) - filtered_history = self.filter_history(transformed_history, 40, 2000) - config = {"metadata": {"conversation_id": str(chat_id)}} - - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{question}"), - ] - ) - prompt_formated = prompt.format_messages( - chat_history=filtered_history, - question=question.question, - custom_personality=( - self.prompt_to_use.content if self.prompt_to_use else None - ), - ) - model_response = conversational_qa_chain.invoke( - {"messages": prompt_formated}, - config=config, - ) - - answer = model_response["messages"][-1].content - - return self.save_non_streaming_answer( - chat_id=chat_id, question=question, answer=answer, metadata={} - ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/GPT4/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/GPT4/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py deleted file mode 100644 index 8b5aafdd89ca..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/Brain.py +++ /dev/null @@ -1,205 +0,0 @@ -import datetime -from operator import itemgetter -from typing import List - -from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate -from langchain_community.chat_models import ChatLiteLLM -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, PromptTemplate -from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 -from langchain_core.pydantic_v1 import Field as FieldV1 -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from langchain_openai import ChatOpenAI - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA - -logger = get_logger(__name__) - - -class cited_answer(BaseModelV1): - """Answer the user question based only on the given sources, and cite the sources used.""" - - thoughts: str = FieldV1( - ..., - description="""Description of the thought process, based only on the given sources. - Cite the text as much as possible and give the document name it appears in. In the format : 'Doc_name states : cited_text'. Be the most - procedural as possible.""", - ) - answer: str = FieldV1( - ..., - description="The answer to the user question, which is based only on the given sources.", - ) - citations: List[int] = FieldV1( - ..., - description="The integer IDs of the SPECIFIC sources which justify the answer.", - ) - - thoughts: str = FieldV1( - ..., - description="Explain shortly what you did to find the answer and what you used by citing the sources by their name.", - ) - followup_questions: List[str] = FieldV1( - ..., - description="Generate up to 3 follow-up questions that could be asked based on the answer given or context provided.", - ) - - -# First step is to create the Rephrasing Prompt -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. - -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - -# Next is the answering prompt - -template_answer = """ -Context: -{context} - -User Question: {question} -Answer: -""" - -today_date = datetime.datetime.now().strftime("%B %d, %Y") - -system_message_template = ( - f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}." -) - -system_message_template += """ -When answering use markdown neat. -Answer in a concise and clear manner. -Use the following pieces of context from files provided by the user to answer the users. -Answer in the same language as the user question. -If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer. -Don't cite the source id in the answer objects, but you can use the source to answer the question. -You have access to the files to answer the user question (limited to first 20 files): -{files} - -If not None, User instruction to follow to answer: {custom_instructions} -Don't cite the source id in the answer objects, but you can use the source to answer the question. -""" - - -ANSWER_PROMPT = ChatPromptTemplate.from_messages( - [ - SystemMessagePromptTemplate.from_template(system_message_template), - HumanMessagePromptTemplate.from_template(template_answer), - ] -) - - -# How we format documents - -DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template( - template="Source: {index} \n {page_content}" -) - - -class MultiContractBrain(KnowledgeBrainQA): - """ - The MultiContract class integrates advanced conversational retrieval and language model chains - to provide comprehensive and context-aware responses to user queries. - - It leverages a combination of document retrieval, question condensation, and document-based - question answering to generate responses that are informed by a wide range of knowledge sources. - """ - - def __init__( - self, - **kwargs, - ): - """ - Initializes the MultiContract class with specific configurations. - - Args: - **kwargs: Arbitrary keyword arguments. - """ - super().__init__( - **kwargs, - ) - - def get_chain(self): - list_files_array = ( - self.knowledge_qa.knowledge_service.get_all_knowledge_in_brain( - self.brain_id - ) - ) # pyright: ignore reportPrivateUsage=none - - list_files_array = [file.file_name for file in list_files_array] - # Max first 10 files - if len(list_files_array) > 20: - list_files_array = list_files_array[:20] - - list_files = "\n".join(list_files_array) if list_files_array else "None" - - retriever_doc = self.knowledge_qa.get_retriever() - - loaded_memory = RunnablePassthrough.assign( - chat_history=RunnableLambda( - lambda x: self.filter_history(x["chat_history"]), - ), - question=lambda x: x["question"], - ) - - api_base = None - if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): - api_base = self.brain_settings.ollama_api_base_url - - standalone_question = { - "standalone_question": { - "question": lambda x: x["question"], - "chat_history": itemgetter("chat_history"), - } - | CONDENSE_QUESTION_PROMPT - | ChatLiteLLM(temperature=0, model=self.model, api_base=api_base) - | StrOutputParser(), - } - - knowledge_qa = self.knowledge_qa - prompt_custom_user = knowledge_qa.prompt_to_use() - prompt_to_use = "None" - if prompt_custom_user: - prompt_to_use = prompt_custom_user.content - - # Now we retrieve the documents - retrieved_documents = { - "docs": itemgetter("standalone_question") | retriever_doc, - "question": lambda x: x["standalone_question"], - "custom_instructions": lambda x: prompt_to_use, - } - - final_inputs = { - "context": lambda x: self.knowledge_qa._combine_documents(x["docs"]), - "question": itemgetter("question"), - "custom_instructions": itemgetter("custom_instructions"), - "files": lambda x: list_files, - } - llm = ChatLiteLLM( - max_tokens=self.max_tokens, - model=self.model, - temperature=self.temperature, - api_base=api_base, - ) # pyright: ignore reportPrivateUsage=none - if self.model_compatible_with_function_calling(self.model): - # And finally, we do the part that returns the answers - llm_function = ChatOpenAI( - max_tokens=self.max_tokens, - model=self.model, - temperature=self.temperature, - ) - llm = llm_function.bind_tools( - [cited_answer], - tool_choice="cited_answer", - ) - - answer = { - "answer": final_inputs | ANSWER_PROMPT | llm, - "docs": itemgetter("docs"), - } - - return loaded_memory | standalone_question | retrieved_documents | answer diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Multi_Contract/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py deleted file mode 100644 index 3e7f61c4bd02..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Notion/Brain.py +++ /dev/null @@ -1,25 +0,0 @@ -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA - - -class NotionBrain(KnowledgeBrainQA): - """ - NotionBrain integrates with Notion to provide knowledge-based responses. - It leverages data stored in Notion to answer user queries. - - Attributes: - **kwargs: Arbitrary keyword arguments for KnowledgeBrainQA initialization. - """ - - def __init__( - self, - **kwargs, - ): - """ - Initializes the NotionBrain with the given arguments. - - Args: - **kwargs: Arbitrary keyword arguments. - """ - super().__init__( - **kwargs, - ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py b/backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py deleted file mode 100644 index 2fca462c2c98..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Notion/Notion_connector.py +++ /dev/null @@ -1,393 +0,0 @@ -import os -import tempfile -import time -from io import BytesIO -from typing import Any, List, Optional - -import requests -from fastapi import UploadFile -from pydantic import BaseModel - -from quivr_core.api.celery_config import celery -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.entity.integration_brain import IntegrationEntity -from quivr_core.api.modules.brain.repository.integration_brains import ( - Integration, - IntegrationBrain, -) -from quivr_core.api.modules.knowledge.dto.inputs import CreateKnowledgeProperties -from quivr_core.api.modules.knowledge.repository.knowledge_interface import ( - KnowledgeInterface, -) -from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService -from quivr_core.api.modules.upload.service.upload_file import upload_file_storage - -logger = get_logger(__name__) - - -class NotionPage(BaseModel): - """Represents a Notion Page object to be used in the NotionConnector class""" - - id: str - created_time: str - last_edited_time: str - archived: bool - properties: dict[str, Any] - url: str - - -class NotionSearchResponse(BaseModel): - """Represents the response from the Notion Search API""" - - results: list[dict[str, Any]] - next_cursor: Optional[str] = None - has_more: bool = False - - -class NotionConnector(IntegrationBrain, Integration): - """A class to interact with the Notion API""" - - credentials: dict[str, str] = None - integration_details: IntegrationEntity = None - brain_id: str = None - user_id: str = None - knowledge_service: KnowledgeInterface - recursive_index_enabled: bool = False - max_pages: int = 100 - - def __init__(self, brain_id: str, user_id: str): - super().__init__() - self.brain_id = brain_id - self.user_id = user_id - self._load_credentials() - self.knowledge_service = KnowledgeService() - - def _load_credentials(self) -> dict[str, str]: - """Load the Notion credentials""" - self.integration_details = self.get_integration_brain(self.brain_id) - if self.credentials is None: - logger.info("Loading Notion credentials") - self.integration_details.credentials = { - "notion_integration_token": self.integration_details.settings.get( - "notion_integration_token", "" - ) - } - self.update_integration_brain( - self.brain_id, self.user_id, self.integration_details - ) - self.credentials = self.integration_details.credentials - else: # pragma: no cover - self.credentials = self.integration_details.credentials - - def _headers(self) -> dict[str, str]: - """Get the headers for the Notion API""" - return { - "Authorization": f'Bearer {self.credentials["notion_integration_token"]}', - "Content-Type": "application/json", - "Notion-Version": "2022-06-28", - } - - def _search_notion(self, query_dict: dict[str, Any]) -> NotionSearchResponse: - """ - Search for pages from a Notion database. - """ - # Use self.credentials to authenticate the request - headers = self._headers() - res = requests.post( - "https://api.notion.com/v1/search", - headers=headers, - json=query_dict, - # Adjust the timeout as needed - timeout=10, - ) - res.raise_for_status() - return NotionSearchResponse(**res.json()) - - def _fetch_blocks(self, page_id: str, cursor: str | None = None) -> dict[str, Any]: - """ - Fetch the blocks of a Notion page. - """ - logger.info(f"Fetching blocks for page: {page_id}") - headers = self._headers() - query_params = None if not cursor else {"start_cursor": cursor} - res = requests.get( - f"https://api.notion.com/v1/blocks/{page_id}/children", - params=query_params, - headers=headers, - timeout=10, - ) - res.raise_for_status() - return res.json() - - def _fetch_page(self, page_id: str) -> dict[str, Any]: - """ - Fetch a Notion page. - """ - logger.info(f"Fetching page: {page_id}") - headers = self._headers() - block_url = f"https://api.notion.com/v1/pages/{page_id}" - res = requests.get( - block_url, - headers=headers, - timeout=10, - ) - try: - res.raise_for_status() - except Exception: - logger.exception(f"Error fetching page - {res.json()}") - return None - return NotionPage(**res.json()) - - def _read_blocks( - self, page_block_id: str - ) -> tuple[list[tuple[str, str]], list[str]]: - """Reads blocks for a page""" - result_lines: list[tuple[str, str]] = [] - child_pages: list[str] = [] - cursor = None - while True: - data = self._fetch_blocks(page_block_id, cursor) - - for result in data["results"]: - result_block_id = result["id"] - result_type = result["type"] - result_obj = result[result_type] - - cur_result_text_arr = [] - if "rich_text" in result_obj: - for rich_text in result_obj["rich_text"]: - # skip if doesn't have text object - if "text" in rich_text: - text = rich_text["text"]["content"] - cur_result_text_arr.append(text) - - if result["has_children"]: - if result_type == "child_page": - child_pages.append(result_block_id) - else: - logger.info(f"Entering sub-block: {result_block_id}") - subblock_result_lines, subblock_child_pages = self._read_blocks( - result_block_id - ) - logger.info(f"Finished sub-block: {result_block_id}") - result_lines.extend(subblock_result_lines) - child_pages.extend(subblock_child_pages) - - # if result_type == "child_database" and self.recursive_index_enabled: - # child_pages.extend(self._read_pages_from_database(result_block_id)) - - cur_result_text = "\n".join(cur_result_text_arr) - if cur_result_text: - result_lines.append((cur_result_text, result_block_id)) - - if data["next_cursor"] is None: - break - - cursor = data["next_cursor"] - - return result_lines, child_pages - - def _read_page_title(self, page: NotionPage) -> str: - """Extracts the title from a Notion page""" - page_title = None - for _, prop in page.properties.items(): - if prop["type"] == "title" and len(prop["title"]) > 0: - page_title = " ".join([t["plain_text"] for t in prop["title"]]).strip() - break - if page_title is None: - page_title = f"Untitled Page [{page.id}]" - page_title = "".join(e for e in page_title if e.isalnum()) - return page_title - - def _read_page_url(self, page: NotionPage) -> str: - """Extracts the URL from a Notion page""" - return page.url - - def _read_pages_from_database(self, database_id: str) -> list[str]: - """Reads pages from a Notion database""" - headers = self._headers() - res = requests.post( - f"https://api.notion.com/v1/databases/{database_id}/query", - headers=headers, - timeout=10, - ) - res.raise_for_status() - return [page["id"] for page in res.json()["results"]] - - def _read_page(self, page_id: str) -> tuple[str, list[str]]: - """Reads a Notion page""" - page = self._fetch_page(page_id) - if page is None: - return None, None, None, None - page_title = self._read_page_title(page) - page_content, child_pages = self._read_blocks(page_id) - page_url = self._read_page_url(page) - return page_title, page_content, child_pages, page_url - - def _filter_pages_by_time( - self, - pages: list[dict[str, Any]], - start: str, - filter_field: str = "last_edited_time", - ) -> list[NotionPage]: - filtered_pages: list[NotionPage] = [] - start_time = time.mktime( - time.strptime(start, "%Y-%m-%dT%H:%M:%S.%f%z") - ) # Convert `start` to a float - for page in pages: - compare_time = time.mktime( - time.strptime(page[filter_field], "%Y-%m-%dT%H:%M:%S.%f%z") - ) - if compare_time > start_time: # Compare `compare_time` with `start_time` - filtered_pages += [NotionPage(**page)] - return filtered_pages - - def get_all_pages(self) -> list[NotionPage]: - """ - Get all the pages from Notion. - """ - query_dict = { - "filter": {"property": "object", "value": "page"}, - "page_size": 100, - } - max_pages = self.max_pages - pages_count = 0 - while True: - search_response = self._search_notion(query_dict) - for page in search_response.results: - pages_count += 1 - if pages_count > max_pages: - break - yield NotionPage(**page) - - if search_response.has_more: - query_dict["start_cursor"] = search_response.next_cursor - else: - break - - def add_file_to_knowledge( - self, page_content: List[tuple[str, str]], page_name: str, page_url: str - ): - """ - Add a file to the knowledge base - """ - logger.info(f"Adding file to knowledge: {page_name}") - filename_with_brain_id = ( - str(self.brain_id) + "/" + str(page_name) + "_notion.txt" - ) - try: - concatened_page_content = "" - if page_content: - for content in page_content: - concatened_page_content += content[0] + "\n" - - # Create a BytesIO object from the content - content_io = BytesIO(concatened_page_content.encode("utf-8")) - - # Create a file of type UploadFile - file = UploadFile(filename=filename_with_brain_id, file=content_io) - - # Write the UploadFile content to a temporary file - with tempfile.NamedTemporaryFile(delete=False) as temp_file: - temp_file.write(file.file.read()) - temp_file_path = temp_file.name - - # Upload the temporary file to the knowledge base - response = upload_file_storage( - temp_file_path, filename_with_brain_id, "true" - ) - logger.info(f"File {response} uploaded successfully") - - # Delete the temporary file - os.remove(temp_file_path) - - knowledge_to_add = CreateKnowledgeProperties( - brain_id=self.brain_id, - file_name=page_name + "_notion.txt", - extension="txt", - integration="notion", - integration_link=page_url, - ) - - added_knowledge = self.knowledge_service.add_knowledge(knowledge_to_add) - logger.info(f"Knowledge {added_knowledge} added successfully") - - celery.send_task( - "process_file_and_notify", - kwargs={ - "file_name": filename_with_brain_id, - "file_original_name": page_name + "_notion.txt", - "brain_id": self.brain_id, - "delete_file": True, - }, - ) - except Exception: - logger.error("Error adding knowledge") - - def load(self): - """ - Get all the pages, blocks, databases from Notion into a single document per page - """ - all_pages = list(self.get_all_pages()) # Convert generator to list - documents = [] - for page in all_pages: - logger.info(f"Reading page: {page.id}") - page_title, page_content, child_pages, page_url = self._read_page(page.id) - document = { - "page_title": page_title, - "page_content": page_content, - "child_pages": child_pages, - "page_url": page_url, - } - documents.append(document) - self.add_file_to_knowledge(page_content, page_title, page_url) - return documents - - def poll(self): - """ - Update all the brains with the latest data from Notion - """ - integration = self.get_integration_brain(self.brain_id) - last_synced = integration.last_synced - - query_dict = { - "page_size": self.max_pages, - "sort": {"timestamp": "last_edited_time", "direction": "descending"}, - "filter": {"property": "object", "value": "page"}, - } - documents = [] - - while True: - db_res = self._search_notion(query_dict) - pages = self._filter_pages_by_time( - db_res.results, last_synced, filter_field="last_edited_time" - ) - for page in pages: - logger.info(f"Reading page: {page.id}") - page_title, page_content, child_pages, page_url = self._read_page( - page.id - ) - document = { - "page_title": page_title, - "page_content": page_content, - "child_pages": child_pages, - "page_url": page_url, - } - documents.append(document) - self.add_file_to_knowledge(page_content, page_title, page_url) - if not db_res.has_more: - break - query_dict["start_cursor"] = db_res.next_cursor - logger.info( - f"last Synced: {self.update_last_synced(self.brain_id, self.user_id)}" - ) - return documents - - -if __name__ == "__main__": - notion = NotionConnector( - brain_id="73f7d092-d596-4fd0-b24f-24031e9b53cd", - user_id="39418e3b-0258-4452-af60-7acfcc1263ff", - ) - - print(notion.poll()) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Notion/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Notion/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py deleted file mode 100644 index a121b7596683..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Proxy/Brain.py +++ /dev/null @@ -1,135 +0,0 @@ -import json -from typing import AsyncIterable -from uuid import UUID - -from langchain_community.chat_models import ChatLiteLLM -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.chat.dto.chats import ChatQuestion -from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service - -logger = get_logger(__name__) - -chat_service = get_service(ChatService)() - - -class ProxyBrain(KnowledgeBrainQA): - """ - ProxyBrain class serves as a proxy to utilize various language models for generating responses. - It dynamically selects and uses the appropriate language model based on the provided context and question. - """ - - def __init__( - self, - **kwargs, - ): - """ - Initializes the ProxyBrain with the given arguments. - - Args: - **kwargs: Arbitrary keyword arguments. - """ - super().__init__( - **kwargs, - ) - - def get_chain(self): - """ - Constructs and returns the conversational chain for ProxyBrain. - - Returns: - A conversational chain object. - """ - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are Quivr. You are an assistant. {custom_personality}", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{question}"), - ] - ) - - chain = prompt | ChatLiteLLM(model=self.model, max_tokens=self.max_tokens) - - return chain - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - """ - Generates a stream of responses for the given question. - - Args: - chat_id (UUID): The chat session ID. - question (ChatQuestion): The question object. - save_answer (bool): Whether to save the answer. - - Yields: - AsyncIterable: A stream of response strings. - """ - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - response_tokens = [] - config = {"metadata": {"conversation_id": str(chat_id)}} - - async for chunk in conversational_qa_chain.astream( - { - "question": question.question, - "chat_history": transformed_history, - "custom_personality": ( - self.prompt_to_use.content if self.prompt_to_use else None - ), - }, - config=config, - ): - response_tokens.append(chunk.content) - streamed_chat_history.assistant = chunk.content - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) - - def generate_answer( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> GetChatHistoryOutput: - """ - Generates a non-streaming answer for the given question. - - Args: - chat_id (UUID): The chat session ID. - question (ChatQuestion): The question object. - save_answer (bool): Whether to save the answer. - - Returns: - GetChatHistoryOutput: The chat history output object containing the answer. - """ - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - config = {"metadata": {"conversation_id": str(chat_id)}} - model_response = conversational_qa_chain.invoke( - { - "question": question.question, - "chat_history": transformed_history, - "custom_personality": ( - self.prompt_to_use.content if self.prompt_to_use else None - ), - }, - config=config, - ) - - answer = model_response.content - - return self.save_non_streaming_answer( - chat_id=chat_id, - question=question, - answer=answer, - ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Proxy/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Proxy/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py deleted file mode 100644 index 9559c658665b..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/SQL/Brain.py +++ /dev/null @@ -1,104 +0,0 @@ -import json -from typing import AsyncIterable -from uuid import UUID - -from langchain_community.chat_models import ChatLiteLLM -from langchain_community.utilities import SQLDatabase -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.runnables import RunnablePassthrough - -from quivr_core.api.modules.brain.integrations.SQL.SQL_connector import SQLConnector -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.brain.repository.integration_brains import IntegrationBrain -from quivr_core.api.modules.chat.dto.chats import ChatQuestion - - -class SQLBrain(KnowledgeBrainQA, IntegrationBrain): - """This is the Notion brain class. it is a KnowledgeBrainQA has the data is stored locally. - It is going to call the Data Store internally to get the data. - - Args: - KnowledgeBrainQA (_type_): A brain that store the knowledge internaly - """ - - uri: str = None - db: SQLDatabase = None - sql_connector: SQLConnector = None - - def __init__( - self, - **kwargs, - ): - super().__init__( - **kwargs, - ) - self.sql_connector = SQLConnector(self.brain_id, self.user_id) - - def get_schema(self, _): - return self.db.get_table_info() - - def run_query(self, query): - return self.db.run(query) - - def get_chain(self): - template = """Based on the table schema below, write a SQL query that would answer the user's question: - {schema} - - Question: {question} - SQL Query:""" - prompt = ChatPromptTemplate.from_template(template) - - self.db = SQLDatabase.from_uri(self.sql_connector.credentials["uri"]) - - api_base = None - if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): - api_base = self.brain_settings.ollama_api_base_url - - model = ChatLiteLLM(model=self.model, api_base=api_base) - - sql_response = ( - RunnablePassthrough.assign(schema=self.get_schema) - | prompt - | model.bind(stop=["\nSQLResult:"]) - | StrOutputParser() - ) - - template = """Based on the table schema below, question, sql query, and sql response, write a natural language response and the query that was used to generate it.: - {schema} - - Question: {question} - SQL Query: {query} - SQL Response: {response}""" - prompt_response = ChatPromptTemplate.from_template(template) - - full_chain = ( - RunnablePassthrough.assign(query=sql_response).assign( - schema=self.get_schema, - response=lambda x: self.db.run(x["query"]), - ) - | prompt_response - | model - ) - - return full_chain - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - response_tokens = [] - - async for chunk in conversational_qa_chain.astream( - { - "question": question.question, - } - ): - response_tokens.append(chunk.content) - streamed_chat_history.assistant = chunk.content - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py b/backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py deleted file mode 100644 index 671cee947b64..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/SQL/SQL_connector.py +++ /dev/null @@ -1,41 +0,0 @@ -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.entity.integration_brain import IntegrationEntity -from quivr_core.api.modules.brain.repository.integration_brains import IntegrationBrain -from quivr_core.api.modules.knowledge.repository.knowledge_interface import ( - KnowledgeInterface, -) -from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService - -logger = get_logger(__name__) - - -class SQLConnector(IntegrationBrain): - """A class to interact with an SQL database""" - - credentials: dict[str, str] = None - integration_details: IntegrationEntity = None - brain_id: str = None - user_id: str = None - knowledge_service: KnowledgeInterface - - def __init__(self, brain_id: str, user_id: str): - super().__init__() - self.brain_id = brain_id - self.user_id = user_id - self._load_credentials() - self.knowledge_service = KnowledgeService() - - def _load_credentials(self) -> dict[str, str]: - """Load the Notion credentials""" - self.integration_details = self.get_integration_brain(self.brain_id) - if self.credentials is None: - logger.info("Loading Notion credentials") - self.integration_details.credentials = { - "uri": self.integration_details.settings.get("uri", "") - } - self.update_integration_brain( - self.brain_id, self.user_id, self.integration_details - ) - self.credentials = self.integration_details.credentials - else: # pragma: no cover - self.credentials = self.integration_details.credentials diff --git a/backend/core/quivr_core/api/modules/brain/integrations/SQL/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/SQL/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py b/backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py deleted file mode 100644 index e0fed77d68ba..000000000000 --- a/backend/core/quivr_core/api/modules/brain/integrations/Self/Brain.py +++ /dev/null @@ -1,487 +0,0 @@ -import json -from typing import AsyncIterable, List -from uuid import UUID - -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - MessagesPlaceholder, - PromptTemplate, -) -from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 -from langchain_core.pydantic_v1 import Field as FieldV1 -from langchain_openai import ChatOpenAI -from langgraph.graph import END, StateGraph -from typing_extensions import TypedDict - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.chat.dto.chats import ChatQuestion -from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service - - -# Post-processing -def format_docs(docs): - return "\n\n".join(doc.page_content for doc in docs) - - -class GraphState(TypedDict): - """ - Represents the state of our graph. - - Attributes: - question: question - generation: LLM generation - documents: list of documents - """ - - question: str - generation: str - documents: List[str] - - -# Data model -class GradeDocuments(BaseModelV1): - """Binary score for relevance check on retrieved documents.""" - - binary_score: str = FieldV1( - description="Documents are relevant to the question, 'yes' or 'no'" - ) - - -class GradeHallucinations(BaseModelV1): - """Binary score for hallucination present in generation answer.""" - - binary_score: str = FieldV1( - description="Answer is grounded in the facts, 'yes' or 'no'" - ) - - -# Data model -class GradeAnswer(BaseModelV1): - """Binary score to assess answer addresses question.""" - - binary_score: str = FieldV1( - description="Answer addresses the question, 'yes' or 'no'" - ) - - -logger = get_logger(__name__) - -chat_service = get_service(ChatService)() - - -class SelfBrain(KnowledgeBrainQA): - """ - GPT4Brain integrates with GPT-4 to provide real-time answers and supports various tools to enhance its capabilities. - - Available Tools: - - WebSearchTool: Performs web searches to find relevant information. - - ImageGeneratorTool: Generates images based on textual descriptions. - - URLReaderTool: Reads and summarizes content from URLs. - - EmailSenderTool: Sends emails with specified content. - - Use Cases: - - WebSearchTool can be used to find the latest news articles on a specific topic or to gather information from various websites. - - ImageGeneratorTool is useful for creating visual content based on textual prompts, such as generating a company logo based on a description. - - URLReaderTool can be used to summarize articles or web pages, making it easier to quickly understand the content without reading the entire text. - - EmailSenderTool enables automated email sending, such as sending a summary of a meeting's minutes to all participants. - """ - - max_input: int = 10000 - - def __init__( - self, - **kwargs, - ): - super().__init__( - **kwargs, - ) - - def calculate_pricing(self): - return 3 - - def retrieval_grade(self): - llm = ChatOpenAI(model="gpt-4o", temperature=0) - structured_llm_grader = llm.with_structured_output(GradeDocuments) - - # Prompt - system = """You are a grader assessing relevance of a retrieved document to a user question. \n - It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n - If the document contains keyword(s) or semantic meaning related to the user question, grade it as relevant. \n - Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question.""" - grade_prompt = ChatPromptTemplate.from_messages( - [ - ("system", system), - ( - "human", - "Retrieved document: \n\n {document} \n\n User question: {question}", - ), - ] - ) - - retrieval_grader = grade_prompt | structured_llm_grader - - return retrieval_grader - - def generation_rag(self): - # Prompt - human_prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise. - - Question: {question} - - Context: {context} - - Answer: - """ - prompt_human = PromptTemplate.from_template(human_prompt) - # LLM - llm = ChatOpenAI(model="gpt-4o", temperature=0) - - # Chain - rag_chain = prompt_human | llm | StrOutputParser() - - return rag_chain - - def hallucination_grader(self): - # LLM with function call - llm = ChatOpenAI(model="gpt-4o", temperature=0) - structured_llm_grader = llm.with_structured_output(GradeHallucinations) - - # Prompt - system = """You are a grader assessing whether an LLM generation is grounded in / supported by a set of retrieved facts. \n - Give a binary score 'yes' or 'no'. 'Yes' means that the answer is grounded in / supported by the set of facts.""" - hallucination_prompt = ChatPromptTemplate.from_messages( - [ - ("system", system), - ( - "human", - "Set of facts: \n\n {documents} \n\n LLM generation: {generation}", - ), - ] - ) - - hallucination_grader = hallucination_prompt | structured_llm_grader - - return hallucination_grader - - def answer_grader(self): - # LLM with function call - llm = ChatOpenAI(model="gpt-4o", temperature=0) - structured_llm_grader = llm.with_structured_output(GradeAnswer) - - # Prompt - system = """You are a grader assessing whether an answer addresses / resolves a question \n - Give a binary score 'yes' or 'no'. Yes' means that the answer resolves the question.""" - answer_prompt = ChatPromptTemplate.from_messages( - [ - ("system", system), - ( - "human", - "User question: \n\n {question} \n\n LLM generation: {generation}", - ), - ] - ) - - answer_grader = answer_prompt | structured_llm_grader - - return answer_grader - - def question_rewriter(self): - # LLM - llm = ChatOpenAI(model="gpt-4o", temperature=0) - - # Prompt - system = """You a question re-writer that converts an input question to a better version that is optimized \n - for vectorstore retrieval. Look at the input and try to reason about the underlying semantic intent / meaning.""" - re_write_prompt = ChatPromptTemplate.from_messages( - [ - ("system", system), - ( - "human", - "Here is the initial question: \n\n {question} \n Formulate an improved question.", - ), - ] - ) - - question_rewriter = re_write_prompt | llm | StrOutputParser() - - return question_rewriter - - def get_chain(self): - graph = self.create_graph() - - return graph - - def create_graph(self): - workflow = StateGraph(GraphState) - - # Define the nodes - workflow.add_node("retrieve", self.retrieve) # retrieve - workflow.add_node("grade_documents", self.grade_documents) # grade documents - workflow.add_node("generate", self.generate) # generatae - workflow.add_node("transform_query", self.transform_query) # transform_query - - # Build graph - workflow.set_entry_point("retrieve") - workflow.add_edge("retrieve", "grade_documents") - workflow.add_conditional_edges( - "grade_documents", - self.decide_to_generate, - { - "transform_query": "transform_query", - "generate": "generate", - }, - ) - workflow.add_edge("transform_query", "retrieve") - workflow.add_conditional_edges( - "generate", - self.grade_generation_v_documents_and_question, - { - "not supported": "generate", - "useful": END, - "not useful": "transform_query", - }, - ) - - # Compile - app = workflow.compile() - return app - - def retrieve(self, state): - """ - Retrieve documents - - Args: - state (dict): The current graph state - - Returns: - state (dict): New key added to state, documents, that contains retrieved documents - """ - print("---RETRIEVE---") - logger.info("Retrieving documents") - question = state["question"] - logger.info(f"Question: {question}") - - # Retrieval - retriever = self.knowledge_qa.get_retriever() - documents = retriever.get_relevant_documents(question) - return {"documents": documents, "question": question} - - def generate(self, state): - """ - Generate answer - - Args: - state (dict): The current graph state - - Returns: - state (dict): New key added to state, generation, that contains LLM generation - """ - print("---GENERATE---") - question = state["question"] - documents = state["documents"] - - formatted_docs = format_docs(documents) - # RAG generation - generation = self.generation_rag().invoke( - {"context": formatted_docs, "question": question} - ) - return {"documents": documents, "question": question, "generation": generation} - - def grade_documents(self, state): - """ - Determines whether the retrieved documents are relevant to the question. - - Args: - state (dict): The current graph state - - Returns: - state (dict): Updates documents key with only filtered relevant documents - """ - - print("---CHECK DOCUMENT RELEVANCE TO QUESTION---") - question = state["question"] - documents = state["documents"] - - # Score each doc - filtered_docs = [] - for d in documents: - score = self.retrieval_grade().invoke( - {"question": question, "document": d.page_content} - ) - grade = score.binary_score - if grade == "yes": - print("---GRADE: DOCUMENT RELEVANT---") - filtered_docs.append(d) - else: - print("---GRADE: DOCUMENT NOT RELEVANT---") - continue - return {"documents": filtered_docs, "question": question} - - def transform_query(self, state): - """ - Transform the query to produce a better question. - - Args: - state (dict): The current graph state - - Returns: - state (dict): Updates question key with a re-phrased question - """ - - print("---TRANSFORM QUERY---") - question = state["question"] - documents = state["documents"] - - # Re-write question - better_question = self.question_rewriter().invoke({"question": question}) - return {"documents": documents, "question": better_question} - - def decide_to_generate(self, state): - """ - Determines whether to generate an answer, or re-generate a question. - - Args: - state (dict): The current graph state - - Returns: - str: Binary decision for next node to call - """ - - print("---ASSESS GRADED DOCUMENTS---") - question = state["question"] - filtered_documents = state["documents"] - - if not filtered_documents: - # All documents have been filtered check_relevance - # We will re-generate a new query - print( - "---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---" - ) - return "transform_query" - else: - # We have relevant documents, so generate answer - print("---DECISION: GENERATE---") - return "generate" - - def grade_generation_v_documents_and_question(self, state): - """ - Determines whether the generation is grounded in the document and answers question. - - Args: - state (dict): The current graph state - - Returns: - str: Decision for next node to call - """ - - print("---CHECK HALLUCINATIONS---") - question = state["question"] - documents = state["documents"] - generation = state["generation"] - - score = self.hallucination_grader().invoke( - {"documents": documents, "generation": generation} - ) - grade = score.binary_score - - # Check hallucination - if grade == "yes": - print("---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---") - # Check question-answering - print("---GRADE GENERATION vs QUESTION---") - score = self.answer_grader().invoke( - {"question": question, "generation": generation} - ) - grade = score.binary_score - if grade == "yes": - print("---DECISION: GENERATION ADDRESSES QUESTION---") - return "useful" - else: - print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---") - return "not useful" - else: - print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---") - return "not supported" - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - conversational_qa_chain = self.get_chain() - transformed_history, streamed_chat_history = ( - self.initialize_streamed_chat_history(chat_id, question) - ) - filtered_history = self.filter_history(transformed_history, 40, 2000) - response_tokens = [] - config = {"metadata": {"conversation_id": str(chat_id)}} - - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{question}"), - ] - ) - prompt_formated = prompt.format_messages( - chat_history=filtered_history, - question=question.question, - custom_personality=( - self.prompt_to_use.content if self.prompt_to_use else None - ), - ) - - async for event in conversational_qa_chain.astream( - {"question": question.question}, config=config - ): - for key, value in event.items(): - if "generation" in value and value["generation"] != "": - response_tokens.append(value["generation"]) - streamed_chat_history.assistant = value["generation"] - - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) - - def generate_answer( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> GetChatHistoryOutput: - conversational_qa_chain = self.get_chain() - transformed_history, _ = self.initialize_streamed_chat_history( - chat_id, question - ) - filtered_history = self.filter_history(transformed_history, 40, 2000) - config = {"metadata": {"conversation_id": str(chat_id)}} - - prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - "You are GPT-4 powered by Quivr. You are an assistant. {custom_personality}", - ), - MessagesPlaceholder(variable_name="chat_history"), - ("human", "{question}"), - ] - ) - prompt_formated = prompt.format_messages( - chat_history=filtered_history, - question=question.question, - custom_personality=( - self.prompt_to_use.content if self.prompt_to_use else None - ), - ) - model_response = conversational_qa_chain.invoke( - {"messages": prompt_formated}, - config=config, - ) - - answer = model_response["messages"][-1].content - - return self.save_non_streaming_answer( - chat_id=chat_id, question=question, answer=answer, metadata={} - ) diff --git a/backend/core/quivr_core/api/modules/brain/integrations/Self/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/Self/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/integrations/__init__.py b/backend/core/quivr_core/api/modules/brain/integrations/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 9cf9b009659801defadec72ddb265d60717fc2d0 Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 14:48:54 +0200 Subject: [PATCH 07/20] core brain cleanup --- .../modules/brain/controller/brain_routes.py | 10 - .../composite_brain_connection_entity.py | 8 - .../api/modules/brain/knowledge_brain_qa.py | 513 ------------------ .../api/modules/brain/rags/__init__.py | 0 .../api/modules/brain/rags/quivr_rag.py | 377 ------------- .../api/modules/brain/rags/rag_interface.py | 31 -- .../brain/repository/api_brain_definitions.py | 7 +- .../api/modules/brain/repository/brains.py | 5 +- .../modules/brain/repository/brains_users.py | 5 +- .../brain/repository/brains_vectors.py | 5 +- .../composite_brains_connections.py | 63 --- .../brain/repository/external_api_secrets.py | 5 +- .../brain/repository/integration_brains.py | 21 +- .../brain/repository/interfaces/__init__.py | 10 - .../api_brain_definitions_interface.py | 38 -- .../repository/interfaces/brains_interface.py | 61 --- .../interfaces/brains_users_interface.py | 95 ---- .../interfaces/brains_vectors_interface.py | 41 -- .../composite_brains_connections_interface.py | 40 -- .../external_api_secrets_interface.py | 29 - .../integration_brains_interface.py | 63 --- .../service/api_brain_definition_service.py | 4 - .../service/brain_authorization_service.py | 78 --- .../modules/brain/service/brain_service.py | 29 +- .../service/brain_subscription/__init__.py | 2 - .../resend_invitation_email.py | 57 -- .../subscription_invitation_service.py | 106 ---- .../brain/service/brain_user_service.py | 13 - .../modules/brain/service/call_brain_api.py | 11 +- .../get_question_context_from_brain.py | 4 +- .../brain/tests/test_brains_interface.py | 0 .../knowledge/controller/knowledge_routes.py | 20 +- 32 files changed, 31 insertions(+), 1720 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py delete mode 100644 backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py delete mode 100644 backend/core/quivr_core/api/modules/brain/rags/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py delete mode 100644 backend/core/quivr_core/api/modules/brain/rags/rag_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py delete mode 100644 backend/core/quivr_core/api/modules/brain/tests/test_brains_interface.py diff --git a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py index 03757e985383..7f1d1e3e0811 100644 --- a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py +++ b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py @@ -13,9 +13,6 @@ from quivr_core.api.modules.brain.entity.integration_brain import ( IntegrationDescriptionEntity, ) -from quivr_core.api.modules.brain.service.brain_authorization_service import ( - has_brain_authorization, -) from quivr_core.api.modules.brain.service.brain_service import BrainService from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService from quivr_core.api.modules.brain.service.get_question_context_from_brain import ( @@ -63,13 +60,6 @@ async def retrieve_public_brains() -> list[PublicBrain]: @brain_router.get( "/brains/{brain_id}/", - dependencies=[ - Depends( - has_brain_authorization( - required_roles=[RoleEnum.Owner, RoleEnum.Editor, RoleEnum.Viewer] - ) - ), - ], tags=["Brain"], ) async def retrieve_brain_by_id( diff --git a/backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py b/backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py deleted file mode 100644 index bb976112eaa5..000000000000 --- a/backend/core/quivr_core/api/modules/brain/entity/composite_brain_connection_entity.py +++ /dev/null @@ -1,8 +0,0 @@ -from uuid import UUID - -from pydantic import BaseModel - - -class CompositeBrainConnectionEntity(BaseModel): - composite_brain_id: UUID - connected_brain_id: UUID diff --git a/backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py b/backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py deleted file mode 100644 index 556677abbb4d..000000000000 --- a/backend/core/quivr_core/api/modules/brain/knowledge_brain_qa.py +++ /dev/null @@ -1,513 +0,0 @@ -import json -from typing import AsyncIterable, List, Optional -from uuid import UUID - -from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler -from pydantic import BaseModel, ConfigDict -from pydantic_settings import BaseSettings - -from quivr_core.api.logger import get_logger -from quivr_core.api.models.settings import BrainSettings -from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity -from quivr_core.api.modules.brain.qa_interface import ( - QAInterface, - model_compatible_with_function_calling, -) -from quivr_core.api.modules.brain.rags.quivr_rag import QuivrRAG -from quivr_core.api.modules.brain.rags.rag_interface import RAGInterface -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.brain.service.utils.format_chat_history import ( - format_chat_history, -) -from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( - get_prompt_to_use_id, -) -from quivr_core.api.modules.chat.controller.chat.utils import ( - find_model_and_generate_metadata, - update_user_usage, -) -from quivr_core.api.modules.chat.dto.chats import ChatQuestion, Sources -from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory -from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.prompt.service.get_prompt_to_use import get_prompt_to_use -from quivr_core.api.modules.upload.service.generate_file_signed_url import ( - generate_file_signed_url, -) -from quivr_core.api.modules.user.service.user_usage import UserUsage - -logger = get_logger(__name__) -QUIVR_DEFAULT_PROMPT = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer." - -brain_service = BrainService() - - -def is_valid_uuid(uuid_to_test, version=4): - try: - uuid_obj = UUID(uuid_to_test, version=version) - except ValueError: - return False - - return str(uuid_obj) == uuid_to_test - - -def generate_source( - source_documents, - brain_id: UUID, - citations: List[int] | None = None, -): - """ - Generate the sources list for the answer - It takes in a list of sources documents and citations that points to the docs index that was used in the answer - """ - # Initialize an empty list for sources - sources_list: List[Sources] = [] - - # Initialize a dictionary for storing generated URLs - generated_urls = {} - - # remove duplicate sources with same name and create a list of unique sources - sources_url_cache = {} - - # Get source documents from the result, default to an empty list if not found - - # If source documents exist - if source_documents: - logger.info(f"Citations {citations}") - # Iterate over each document - for doc, index in zip(source_documents, range(len(source_documents))): - logger.info(f"Processing source document {doc.metadata['file_name']}") - if citations is not None: - if index not in citations: - logger.info(f"Skipping source document {doc.metadata['file_name']}") - continue - # Check if 'url' is in the document metadata - is_url = ( - "original_file_name" in doc.metadata - and doc.metadata["original_file_name"] is not None - and doc.metadata["original_file_name"].startswith("http") - ) - - # Determine the name based on whether it's a URL or a file - name = ( - doc.metadata["original_file_name"] - if is_url - else doc.metadata["file_name"] - ) - - # Determine the type based on whether it's a URL or a file - type_ = "url" if is_url else "file" - - # Determine the source URL based on whether it's a URL or a file - if is_url: - source_url = doc.metadata["original_file_name"] - else: - file_path = f"{brain_id}/{doc.metadata['file_name']}" - # Check if the URL has already been generated - if file_path in generated_urls: - source_url = generated_urls[file_path] - else: - # Generate the URL - if file_path in sources_url_cache: - source_url = sources_url_cache[file_path] - else: - generated_url = generate_file_signed_url(file_path) - if generated_url is not None: - source_url = generated_url.get("signedURL", "") - else: - source_url = "" - # Store the generated URL - generated_urls[file_path] = source_url - - # Append a new Sources object to the list - sources_list.append( - Sources( - name=name, - type=type_, - source_url=source_url, - original_file_name=name, - citation=doc.page_content, - ) - ) - else: - logger.info("No source documents found or source_documents is not a list.") - return sources_list - - -class KnowledgeBrainQA(BaseModel, QAInterface): - """ - Main class for the Brain Picking functionality. - It allows to initialize a Chat model, generate questions and retrieve answers using ConversationalRetrievalChain. - It has two main methods: `generate_question` and `generate_stream`. - One is for generating questions in a single request, the other is for generating questions in a streaming fashion. - Both are the same, except that the streaming version streams the last message as a stream. - Each have the same prompt template, which is defined in the `prompt_template` property. - """ - - model_config = ConfigDict(arbitrary_types_allowed=True) - - # Instantiate settings - brain_settings: BaseSettings = BrainSettings() - - # TODO: remove this !!!!! Only added for compatibility - chat_service: ChatService - - # Default class attributes - model: str = "gpt-3.5-turbo-0125" # pyright: ignore reportPrivateUsage=none - temperature: float = 0.1 - chat_id: str = None # pyright: ignore reportPrivateUsage=none - brain_id: str = None # pyright: ignore reportPrivateUsage=none - max_tokens: int = 2000 - max_input: int = 2000 - streaming: bool = False - knowledge_qa: Optional[RAGInterface] = None - brain: Optional[BrainEntity] = None - user_id: str = None - user_email: str = None - user_usage: Optional[UserUsage] = None - user_settings: Optional[dict] = None - models_settings: Optional[List[dict]] = None - metadata: Optional[dict] = None - - callbacks: List[AsyncIteratorCallbackHandler] = None # pyright: ignore reportPrivateUsage=none - - prompt_id: Optional[UUID] = None - - def __init__( - self, - brain_id: str, - chat_id: str, - chat_service: ChatService, - user_id: str = None, - user_email: str = None, - streaming: bool = False, - prompt_id: Optional[UUID] = None, - metadata: Optional[dict] = None, - cost: int = 100, - **kwargs, - ): - super().__init__( - brain_id=brain_id, - chat_id=chat_id, - chat_service=chat_service, - streaming=streaming, - **kwargs, - ) - self.chat_service = chat_service - self.prompt_id = prompt_id - self.user_id = user_id - self.user_email = user_email - self.user_usage = UserUsage(id=user_id, email=user_email) - # TODO: we already have a brain before !!! - self.brain = brain_service.get_brain_by_id(brain_id) - self.user_settings = self.user_usage.get_user_settings() - - # Get Model settings for the user - self.models_settings = self.user_usage.get_models() - self.increase_usage_user() - self.knowledge_qa = QuivrRAG( - model=self.brain.model if self.brain.model else self.model, - brain_id=brain_id, - chat_id=chat_id, - streaming=streaming, - max_input=self.max_input, - max_tokens=self.max_tokens, - **kwargs, - ) # type: ignore - - @property - def prompt_to_use(self): - if self.brain_id and is_valid_uuid(self.brain_id): - return get_prompt_to_use(UUID(self.brain_id), self.prompt_id) - else: - return None - - @property - def prompt_to_use_id(self) -> Optional[UUID]: - # TODO: move to prompt service or instruction or something - if self.brain_id and is_valid_uuid(self.brain_id): - return get_prompt_to_use_id(UUID(self.brain_id), self.prompt_id) - else: - return None - - def filter_history( - self, chat_history, max_history: int = 10, max_tokens: int = 2000 - ): - """ - Filter out the chat history to only include the messages that are relevant to the current question - - Takes in a chat_history= [HumanMessage(content='Qui est Chloé ? '), AIMessage(content="Chloé est une salariée travaillant pour l'entreprise Quivr en tant qu'AI Engineer, sous la direction de son supérieur hiérarchique, Stanislas Girard."), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content=''), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content="Désolé, je n'ai pas d'autres informations sur Chloé à partir des fichiers fournis.")] - Returns a filtered chat_history with in priority: first max_tokens, then max_history where a Human message and an AI message count as one pair - a token is 4 characters - """ - chat_history = chat_history[::-1] - total_tokens = 0 - total_pairs = 0 - filtered_chat_history = [] - for i in range(0, len(chat_history), 2): - if i + 1 < len(chat_history): - human_message = chat_history[i] - ai_message = chat_history[i + 1] - message_tokens = ( - len(human_message.content) + len(ai_message.content) - ) // 4 - if ( - total_tokens + message_tokens > max_tokens - or total_pairs >= max_history - ): - break - filtered_chat_history.append(human_message) - filtered_chat_history.append(ai_message) - total_tokens += message_tokens - total_pairs += 1 - chat_history = filtered_chat_history[::-1] - - return chat_history - - def increase_usage_user(self): - # Raises an error if the user has consumed all of of his credits - - update_user_usage( - usage=self.user_usage, - user_settings=self.user_settings, - cost=self.calculate_pricing(), - ) - - def calculate_pricing(self): - model_to_use = find_model_and_generate_metadata( - self.brain.model, - self.user_settings, - self.models_settings, - ) - self.model = model_to_use.name - self.max_input = model_to_use.max_input - self.max_tokens = model_to_use.max_output - user_choosen_model_price = 1000 - - for model_setting in self.models_settings: - if model_setting["name"] == self.model: - user_choosen_model_price = model_setting["price"] - - return user_choosen_model_price - - # TODO: deprecated - async def generate_answer( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> GetChatHistoryOutput: - conversational_qa_chain = self.knowledge_qa.get_chain() - transformed_history, _ = await self.initialize_streamed_chat_history( - chat_id, question - ) - metadata = self.metadata or {} - citations = None - answer = "" - config = {"metadata": {"conversation_id": str(chat_id)}} - - model_response = conversational_qa_chain.invoke( - { - "question": question.question, - "chat_history": transformed_history, - "custom_personality": ( - self.prompt_to_use.content if self.prompt_to_use else None - ), - }, - config=config, - ) - - if model_compatible_with_function_calling(model=self.model): - if model_response["answer"].tool_calls: - citations = model_response["answer"].tool_calls[-1]["args"]["citations"] - followup_questions = model_response["answer"].tool_calls[-1]["args"][ - "followup_questions" - ] - thoughts = model_response["answer"].tool_calls[-1]["args"]["thoughts"] - if citations: - citations = citations - if followup_questions: - metadata["followup_questions"] = followup_questions - if thoughts: - metadata["thoughts"] = thoughts - answer = model_response["answer"].tool_calls[-1]["args"]["answer"] - else: - answer = model_response["answer"].content - - sources = model_response["docs"] or [] - - if len(sources) > 0: - sources_list = generate_source(sources, self.brain_id, citations=citations) - serialized_sources_list = [source.dict() for source in sources_list] - metadata["sources"] = serialized_sources_list - - return self.save_non_streaming_answer( - chat_id=chat_id, question=question, answer=answer, metadata=metadata - ) - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - if hasattr(self, "get_chain") and callable(self.get_chain): - conversational_qa_chain = self.get_chain() - else: - conversational_qa_chain = self.knowledge_qa.get_chain() - ( - transformed_history, - streamed_chat_history, - ) = await self.initialize_streamed_chat_history(chat_id, question) - response_tokens = "" - sources = [] - citations = [] - first = True - config = {"metadata": {"conversation_id": str(chat_id)}} - - async for chunk in conversational_qa_chain.astream( - { - "question": question.question, - "chat_history": transformed_history, - "custom_personality": ( - self.prompt_to_use.content if self.prompt_to_use else None - ), - }, - config=config, - ): - if not streamed_chat_history.metadata: - streamed_chat_history.metadata = {} - if model_compatible_with_function_calling(model=self.model): - if chunk.get("answer"): - if first: - gathered = chunk["answer"] - first = False - else: - gathered = gathered + chunk["answer"] - if ( - gathered.tool_calls - and gathered.tool_calls[-1].get("args") - and "answer" in gathered.tool_calls[-1]["args"] - ): - # Only send the difference between answer and response_tokens which was the previous answer - answer = gathered.tool_calls[-1]["args"]["answer"] - difference = answer[len(response_tokens) :] - streamed_chat_history.assistant = difference - response_tokens = answer - - yield f"data: {json.dumps(streamed_chat_history.dict())}" - if ( - gathered.tool_calls - and gathered.tool_calls[-1].get("args") - and "citations" in gathered.tool_calls[-1]["args"] - ): - citations = gathered.tool_calls[-1]["args"]["citations"] - if ( - gathered.tool_calls - and gathered.tool_calls[-1].get("args") - and "followup_questions" in gathered.tool_calls[-1]["args"] - ): - followup_questions = gathered.tool_calls[-1]["args"][ - "followup_questions" - ] - streamed_chat_history.metadata["followup_questions"] = ( - followup_questions - ) - if ( - gathered.tool_calls - and gathered.tool_calls[-1].get("args") - and "thoughts" in gathered.tool_calls[-1]["args"] - ): - thoughts = gathered.tool_calls[-1]["args"]["thoughts"] - streamed_chat_history.metadata["thoughts"] = thoughts - else: - if chunk.get("answer"): - response_tokens += chunk["answer"].content - streamed_chat_history.assistant = chunk["answer"].content - yield f"data: {streamed_chat_history.model_dump_json()}" - - if chunk.get("docs"): - sources = chunk["docs"] - - sources_list = generate_source(sources, self.brain_id, citations) - - # Serialize the sources list - serialized_sources_list = [source.dict() for source in sources_list] - streamed_chat_history.metadata["sources"] = serialized_sources_list - yield f"data: {streamed_chat_history.model_dump_json()}" - self.save_answer(question, response_tokens, streamed_chat_history, save_answer) - - async def initialize_streamed_chat_history(self, chat_id, question): - history = await self.chat_service.get_chat_history(self.chat_id) - transformed_history = format_chat_history(history) - brain = brain_service.get_brain_by_id(self.brain_id) - - streamed_chat_history = self.chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": "", - "brain_id": brain.brain_id, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": streamed_chat_history.message_id, - "message_time": streamed_chat_history.message_time, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "brain_id": str(brain.brain_id) if brain else None, - "metadata": self.metadata, - } - ) - - return transformed_history, streamed_chat_history - - def save_answer( - self, question, response_tokens, streamed_chat_history, save_answer - ): - assistant = "".join(response_tokens) - - try: - if save_answer: - self.chat_service.update_message_by_id( - message_id=str(streamed_chat_history.message_id), - user_message=question.question, - assistant=assistant, - metadata=streamed_chat_history.metadata, - ) - except Exception as e: - logger.error("Error updating message by ID: %s", e) - - def save_non_streaming_answer(self, chat_id, question, answer, metadata): - new_chat = self.chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "brain_id": self.brain.brain_id, - "prompt_id": self.prompt_to_use_id, - "metadata": metadata, - } - ) - ) - - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "message_time": new_chat.message_time, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": self.brain.name if self.brain else None, - "message_id": new_chat.message_id, - "brain_id": str(self.brain.brain_id) if self.brain else None, - "metadata": metadata, - } - ) diff --git a/backend/core/quivr_core/api/modules/brain/rags/__init__.py b/backend/core/quivr_core/api/modules/brain/rags/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py b/backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py deleted file mode 100644 index 817fa13a2afd..000000000000 --- a/backend/core/quivr_core/api/modules/brain/rags/quivr_rag.py +++ /dev/null @@ -1,377 +0,0 @@ -import datetime -import os -from operator import itemgetter -from typing import List, Optional -from uuid import UUID - -from langchain.chains import ConversationalRetrievalChain -from langchain.llms.base import BaseLLM -from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate -from langchain.retrievers import ContextualCompressionRetriever -from langchain.retrievers.document_compressors import FlashrankRerank -from langchain.schema import format_document -from langchain_cohere import CohereRerank -from langchain_community.chat_models import ChatLiteLLM -from langchain_core.output_parsers import StrOutputParser -from langchain_core.prompts import ChatPromptTemplate, PromptTemplate -from langchain_core.pydantic_v1 import BaseModel as BaseModelV1 -from langchain_core.pydantic_v1 import Field as FieldV1 -from langchain_core.runnables import RunnableLambda, RunnablePassthrough -from langchain_openai import ChatOpenAI -from pydantic import BaseModel, ConfigDict -from pydantic_settings import BaseSettings -from supabase.client import Client - -from quivr_core.api.logger import get_logger - -# Importing settings related to the 'brain' -from quivr_core.api.models.settings import ( - BrainSettings, - get_embedding_client, - get_supabase_client, -) -from quivr_core.api.modules.brain.qa_interface import ( - model_compatible_with_function_calling, -) -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service -from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository -from quivr_core.api.modules.prompt.service.get_prompt_to_use import get_prompt_to_use -from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore - -logger = get_logger(__name__) - - -class cited_answer(BaseModelV1): - """Answer the user question based only on the given sources, and cite the sources used.""" - - thoughts: str = FieldV1( - ..., - description="""Description of the thought process, based only on the given sources. - Cite the text as much as possible and give the document name it appears in. In the format : 'Doc_name states : cited_text'. Be the most - procedural as possible. Write all the steps needed to find the answer until you find it.""", - ) - answer: str = FieldV1( - ..., - description="The answer to the user question, which is based only on the given sources.", - ) - citations: List[int] = FieldV1( - ..., - description="The integer IDs of the SPECIFIC sources which justify the answer.", - ) - - thoughts: str = FieldV1( - ..., - description="Explain shortly what you did to find the answer and what you used by citing the sources by their name.", - ) - followup_questions: List[str] = FieldV1( - ..., - description="Generate up to 3 follow-up questions that could be asked based on the answer given or context provided.", - ) - - -# First step is to create the Rephrasing Prompt -_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. Keep as much details as possible from previous messages. Keep entity names and all. - -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" -CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) - -# Next is the answering prompt - -template_answer = """ -Context: -{context} - -User Question: {question} -Answer: -""" - -today_date = datetime.datetime.now().strftime("%B %d, %Y") - -system_message_template = ( - f"Your name is Quivr. You're a helpful assistant. Today's date is {today_date}." -) - -system_message_template += """ -When answering use markdown. -Use markdown code blocks for code snippets. -Answer in a concise and clear manner. -Use the following pieces of context from files provided by the user to answer the users. -Answer in the same language as the user question. -If you don't know the answer with the context provided from the files, just say that you don't know, don't try to make up an answer. -Don't cite the source id in the answer objects, but you can use the source to answer the question. -You have access to the files to answer the user question (limited to first 20 files): -{files} - -If not None, User instruction to follow to answer: {custom_instructions} -Don't cite the source id in the answer objects, but you can use the source to answer the question. -""" - - -ANSWER_PROMPT = ChatPromptTemplate.from_messages( - [ - SystemMessagePromptTemplate.from_template(system_message_template), - HumanMessagePromptTemplate.from_template(template_answer), - ] -) - - -# How we format documents - -DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template( - template="Source: {index} \n {page_content}" -) - - -def is_valid_uuid(uuid_to_test, version=4): - try: - uuid_obj = UUID(uuid_to_test, version=version) - except ValueError: - return False - - return str(uuid_obj) == uuid_to_test - - -brain_service = BrainService() -chat_service = get_service(ChatService)() - - -class QuivrRAG(BaseModel): - """ - Quivr implementation of the RAGInterface. - """ - - model_config = ConfigDict(arbitrary_types_allowed=True) - - # Instantiate settings - brain_settings: BaseSettings = BrainSettings() - # Default class attributes - model: str = None # pyright: ignore reportPrivateUsage=none - temperature: float = 0.1 - chat_id: str = None # pyright: ignore reportPrivateUsage=none - brain_id: str = None # pyright: ignore reportPrivateUsage=none - max_tokens: int = 2000 # Output length - max_input: int = 2000 - streaming: bool = False - knowledge_service: KnowledgeRepository = None - - def prompt_to_use(self): - if self.brain_id and is_valid_uuid(self.brain_id): - return get_prompt_to_use(UUID(self.brain_id), self.prompt_id) - else: - return None - - supabase_client: Optional[Client] = None - vector_store: Optional[CustomSupabaseVectorStore] = None - qa: Optional[ConversationalRetrievalChain] = None - prompt_id: Optional[UUID] = None - - def __init__( - self, - model: str, - brain_id: str, - chat_id: str, - streaming: bool = False, - prompt_id: Optional[UUID] = None, - max_tokens: int = 2000, - max_input: int = 2000, - **kwargs, - ): - super().__init__( - model=model, - brain_id=brain_id, - chat_id=chat_id, - streaming=streaming, - max_tokens=max_tokens, - max_input=max_input, - **kwargs, - ) - self.supabase_client = get_supabase_client() - self.vector_store = self._create_vector_store() - self.prompt_id = prompt_id - self.max_tokens = max_tokens - self.max_input = max_input - self.model = model - self.brain_id = brain_id - self.chat_id = chat_id - self.streaming = streaming - self.knowledge_service = KnowledgeRepository() - - def _create_vector_store(self) -> CustomSupabaseVectorStore: - embeddings = get_embedding_client() - return CustomSupabaseVectorStore( - self.supabase_client, - embeddings, - table_name="vectors", - brain_id=self.brain_id, - max_input=self.max_input, - ) - - def _create_llm( - self, - callbacks, - model, - streaming=False, - temperature=0, - ) -> BaseLLM: - """ - Create a LLM with the given parameters - """ - if streaming and callbacks is None: - raise ValueError( - "Callbacks must be provided when using streaming language models" - ) - - api_base = None - if self.brain_settings.ollama_api_base_url and model.startswith("ollama"): - api_base = ( - self.brain_settings.ollama_api_base_url # pyright: ignore reportPrivateUsage=none - ) - - return ChatLiteLLM( - temperature=temperature, - max_tokens=self.max_tokens, - model=model, - streaming=streaming, - verbose=False, - callbacks=callbacks, - api_base=api_base, - ) # pyright: ignore reportPrivateUsage=none - - def _combine_documents( - self, docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n" - ): - # for each docs, add an index in the metadata to be able to cite the sources - for doc, index in zip(docs, range(len(docs))): - doc.metadata["index"] = index - doc_strings = [format_document(doc, document_prompt) for doc in docs] - return document_separator.join(doc_strings) - - def get_retriever(self): - return self.vector_store.as_retriever() - - def filter_history( - self, chat_history, max_history: int = 10, max_tokens: int = 2000 - ): - """ - Filter out the chat history to only include the messages that are relevant to the current question - - Takes in a chat_history= [HumanMessage(content='Qui est Chloé ? '), AIMessage(content="Chloé est une salariée travaillant pour l'entreprise Quivr en tant qu'AI Engineer, sous la direction de son supérieur hiérarchique, Stanislas Girard."), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content=''), HumanMessage(content='Dis moi en plus sur elle'), AIMessage(content="Désolé, je n'ai pas d'autres informations sur Chloé à partir des fichiers fournis.")] - Returns a filtered chat_history with in priority: first max_tokens, then max_history where a Human message and an AI message count as one pair - a token is 4 characters - """ - chat_history = chat_history[::-1] - total_tokens = 0 - total_pairs = 0 - filtered_chat_history = [] - for i in range(0, len(chat_history), 2): - if i + 1 < len(chat_history): - human_message = chat_history[i] - ai_message = chat_history[i + 1] - message_tokens = ( - len(human_message.content) + len(ai_message.content) - ) // 4 - if ( - total_tokens + message_tokens > max_tokens - or total_pairs >= max_history - ): - break - filtered_chat_history.append(human_message) - filtered_chat_history.append(ai_message) - total_tokens += message_tokens - total_pairs += 1 - chat_history = filtered_chat_history[::-1] - - return chat_history - - def get_chain(self): - list_files_array = self.knowledge_service.get_all_knowledge_in_brain( - self.brain_id - ) # pyright: ignore reportPrivateUsage=none - - list_files_array = [file.file_name or file.url for file in list_files_array] - # Max first 10 files - if len(list_files_array) > 20: - list_files_array = list_files_array[:20] - - list_files = "\n".join(list_files_array) if list_files_array else "None" - - # TODO(@aminediro) : Should be a class level attribute - compressor = None - if os.getenv("COHERE_API_KEY"): - compressor = CohereRerank(top_n=20) - else: - compressor = FlashrankRerank(model="ms-marco-TinyBERT-L-2-v2", top_n=20) - - retriever_doc = self.get_retriever() - compression_retriever = ContextualCompressionRetriever( - base_compressor=compressor, base_retriever=retriever_doc - ) - - loaded_memory = RunnablePassthrough.assign( - chat_history=RunnableLambda( - lambda x: self.filter_history(x["chat_history"]), - ), - question=lambda x: x["question"], - ) - - api_base = None - if self.brain_settings.ollama_api_base_url and self.model.startswith("ollama"): - api_base = self.brain_settings.ollama_api_base_url - - standalone_question = { - "standalone_question": { - "question": lambda x: x["question"], - "chat_history": itemgetter("chat_history"), - } - | CONDENSE_QUESTION_PROMPT - | ChatLiteLLM(temperature=0, model=self.model, api_base=api_base) - | StrOutputParser(), - } - - prompt_custom_user = self.prompt_to_use() - prompt_to_use = "None" - if prompt_custom_user: - prompt_to_use = prompt_custom_user.content - - # Now we retrieve the documents - retrieved_documents = { - "docs": itemgetter("standalone_question") | compression_retriever, - "question": lambda x: x["standalone_question"], - "custom_instructions": lambda x: prompt_to_use, - } - - final_inputs = { - "context": lambda x: self._combine_documents(x["docs"]), - "question": itemgetter("question"), - "custom_instructions": itemgetter("custom_instructions"), - "files": lambda x: list_files, - } - llm = ChatLiteLLM( - max_tokens=self.max_tokens, - model=self.model, - temperature=self.temperature, - api_base=api_base, - ) # pyright: ignore reportPrivateUsage=none - if model_compatible_with_function_calling(self.model): - # And finally, we do the part that returns the answers - llm_function = ChatOpenAI( - max_tokens=self.max_tokens, - model=self.model, - temperature=self.temperature, - ) - llm = llm_function.bind_tools( - [cited_answer], - tool_choice="cited_answer", - ) - - answer = { - "answer": final_inputs | ANSWER_PROMPT | llm, - "docs": itemgetter("docs"), - } - - return loaded_memory | standalone_question | retrieved_documents | answer diff --git a/backend/core/quivr_core/api/modules/brain/rags/rag_interface.py b/backend/core/quivr_core/api/modules/brain/rags/rag_interface.py deleted file mode 100644 index 9364eb0dfcf3..000000000000 --- a/backend/core/quivr_core/api/modules/brain/rags/rag_interface.py +++ /dev/null @@ -1,31 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List, Optional - -from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler -from langchain.chains.combine_documents.base import BaseCombineDocumentsChain -from langchain.chains.llm import LLMChain -from langchain_core.retrievers import BaseRetriever - - -class RAGInterface(ABC): - @abstractmethod - def get_doc_chain( - self, - streaming: bool, - callbacks: Optional[List[AsyncIteratorCallbackHandler]] = None, - ) -> BaseCombineDocumentsChain: - raise NotImplementedError( - "get_doc_chain is an abstract method and must be implemented" - ) - - @abstractmethod - def get_question_generation_llm(self) -> LLMChain: - raise NotImplementedError( - "get_question_generation_llm is an abstract method and must be implemented" - ) - - @abstractmethod - def get_retriever(self) -> BaseRetriever: - raise NotImplementedError( - "get_retriever is an abstract method and must be implemented" - ) diff --git a/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py b/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py index 6b4c49f8758d..47c5e313843c 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py +++ b/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py @@ -6,12 +6,9 @@ from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( ApiBrainDefinitionEntity, ) -from quivr_core.api.modules.brain.repository.interfaces import ( - ApiBrainDefinitionsInterface, -) -class ApiBrainDefinitions(ApiBrainDefinitionsInterface): +class ApiBrainDefinitions: def __init__(self): self.db = get_supabase_client() @@ -21,7 +18,7 @@ def get_api_brain_definition( response = ( self.db.table("api_brain_definition") .select("*") - .filter("brain_id", "eq", brain_id) + .filter("brain_id", "eq", str(brain_id)) .execute() ) if len(response.data) == 0: diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains.py b/backend/core/quivr_core/api/modules/brain/repository/brains.py index ce3c844ee0b0..e3ca02a122e5 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/brains.py +++ b/backend/core/quivr_core/api/modules/brain/repository/brains.py @@ -10,14 +10,11 @@ ) from quivr_core.api.modules.brain.dto.inputs import BrainUpdatableProperties from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity, PublicBrain -from quivr_core.api.modules.brain.repository.interfaces.brains_interface import ( - BrainsInterface, -) logger = get_logger(__name__) -class Brains(BrainsInterface): +class Brains: def __init__(self): supabase_client = get_supabase_client() self.db = supabase_client diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains_users.py b/backend/core/quivr_core/api/modules/brain/repository/brains_users.py index 07410f79a4e4..a420e999bdf8 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/brains_users.py +++ b/backend/core/quivr_core/api/modules/brain/repository/brains_users.py @@ -6,14 +6,11 @@ BrainUser, MinimalUserBrainEntity, ) -from quivr_core.api.modules.brain.repository.interfaces.brains_users_interface import ( - BrainsUsersInterface, -) logger = get_logger(__name__) -class BrainsUsers(BrainsUsersInterface): +class BrainsUsers: def __init__(self): supabase_client = get_supabase_client() self.db = supabase_client diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py b/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py index 2e6ed26ee7f0..dfc551337b96 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py +++ b/backend/core/quivr_core/api/modules/brain/repository/brains_vectors.py @@ -1,13 +1,10 @@ from quivr_core.api.logger import get_logger from quivr_core.api.models.settings import get_supabase_client -from quivr_core.api.modules.brain.repository.interfaces.brains_vectors_interface import ( - BrainsVectorsInterface, -) logger = get_logger(__name__) -class BrainsVectors(BrainsVectorsInterface): +class BrainsVectors: def __init__(self): supabase_client = get_supabase_client() self.db = supabase_client diff --git a/backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py b/backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py deleted file mode 100644 index 38d9515e54ea..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/composite_brains_connections.py +++ /dev/null @@ -1,63 +0,0 @@ -from uuid import UUID - -from quivr_core.api.logger import get_logger -from quivr_core.api.models.settings import get_supabase_client -from quivr_core.api.modules.brain.entity.composite_brain_connection_entity import ( - CompositeBrainConnectionEntity, -) -from quivr_core.api.modules.brain.repository.interfaces import ( - CompositeBrainsConnectionsInterface, -) - -logger = get_logger(__name__) - - -class CompositeBrainsConnections(CompositeBrainsConnectionsInterface): - def __init__(self): - self.db = get_supabase_client() - - def connect_brain( - self, composite_brain_id: UUID, connected_brain_id: UUID - ) -> CompositeBrainConnectionEntity: - response = ( - self.db.table("composite_brain_connections") - .insert( - { - "composite_brain_id": str(composite_brain_id), - "connected_brain_id": str(connected_brain_id), - } - ) - .execute() - ) - - return CompositeBrainConnectionEntity(**response.data[0]) - - def get_connected_brains(self, composite_brain_id: UUID) -> list[UUID]: - response = ( - self.db.from_("composite_brain_connections") - .select("connected_brain_id") - .filter("composite_brain_id", "eq", str(composite_brain_id)) - .execute() - ) - - return [item["connected_brain_id"] for item in response.data] - - def disconnect_brain( - self, composite_brain_id: UUID, connected_brain_id: UUID - ) -> None: - self.db.table("composite_brain_connections").delete().match( - { - "composite_brain_id": composite_brain_id, - "connected_brain_id": connected_brain_id, - } - ).execute() - - def is_connected_brain(self, brain_id: UUID) -> bool: - response = ( - self.db.from_("composite_brain_connections") - .select("connected_brain_id") - .filter("connected_brain_id", "eq", str(brain_id)) - .execute() - ) - - return len(response.data) > 0 diff --git a/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py b/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py index 1eb4fa4feab4..34cede7f14d6 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py +++ b/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py @@ -1,16 +1,13 @@ from uuid import UUID from quivr_core.api.models.settings import get_supabase_client -from quivr_core.api.modules.brain.repository.interfaces.external_api_secrets_interface import ( - ExternalApiSecretsInterface, -) def build_secret_unique_name(user_id: UUID, brain_id: UUID, secret_name: str): return f"{user_id}-{brain_id}-{secret_name}" -class ExternalApiSecrets(ExternalApiSecretsInterface): +class ExternalApiSecrets: def __init__(self): supabase_client = get_supabase_client() self.db = supabase_client diff --git a/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py b/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py index 0192a3e3547e..aa00d32ce572 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py +++ b/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py @@ -1,4 +1,3 @@ -from abc import ABC, abstractmethod from typing import List from quivr_core.api.models.settings import get_supabase_client @@ -6,23 +5,9 @@ IntegrationDescriptionEntity, IntegrationEntity, ) -from quivr_core.api.modules.brain.repository.interfaces.integration_brains_interface import ( - IntegrationBrainInterface, - IntegrationDescriptionInterface, -) - - -class Integration(ABC): - @abstractmethod - def load(self): - pass - - @abstractmethod - def poll(self): - pass -class IntegrationBrain(IntegrationBrainInterface): +class IntegrationBrain: """This is all the methods to interact with the integration brain. Args: @@ -100,7 +85,7 @@ def delete_integration_brain(self, brain_id, user_id): def get_integration_brain_by_type_integration( self, integration_name - ) -> List[IntegrationEntity]: + ) -> List[IntegrationEntity] | None: response = ( self.db.table("integrations_user") .select("*, integrations ()") @@ -113,7 +98,7 @@ def get_integration_brain_by_type_integration( return [IntegrationEntity(**data) for data in response.data] -class IntegrationDescription(IntegrationDescriptionInterface): +class IntegrationDescription: def __init__(self): self.db = get_supabase_client() diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py deleted file mode 100644 index 7e38450bcc4c..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .api_brain_definitions_interface import ApiBrainDefinitionsInterface -from .brains_interface import BrainsInterface -from .brains_users_interface import BrainsUsersInterface -from .brains_vectors_interface import BrainsVectorsInterface -from .composite_brains_connections_interface import CompositeBrainsConnectionsInterface -from .external_api_secrets_interface import ExternalApiSecretsInterface -from .integration_brains_interface import ( - IntegrationBrainInterface, - IntegrationDescriptionInterface, -) diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py deleted file mode 100644 index 73fc6591d871..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/api_brain_definitions_interface.py +++ /dev/null @@ -1,38 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional -from uuid import UUID - -from quivr_core.api.modules.brain.dto.inputs import CreateApiBrainDefinition -from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( - ApiBrainDefinitionEntity, -) - - -class ApiBrainDefinitionsInterface(ABC): - @abstractmethod - def get_api_brain_definition( - self, brain_id: UUID - ) -> Optional[ApiBrainDefinitionEntity]: - pass - - @abstractmethod - def add_api_brain_definition( - self, brain_id: UUID, api_brain_definition: CreateApiBrainDefinition - ) -> Optional[ApiBrainDefinitionEntity]: - pass - - @abstractmethod - def update_api_brain_definition( - self, brain_id: UUID, api_brain_definition: ApiBrainDefinitionEntity - ) -> Optional[ApiBrainDefinitionEntity]: - """ - Get all public brains - """ - pass - - @abstractmethod - def delete_api_brain_definition(self, brain_id: UUID) -> None: - """ - Update the last update time of the brain - """ - pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py deleted file mode 100644 index e2a7920266df..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_interface.py +++ /dev/null @@ -1,61 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - -from quivr_core.api.modules.brain.dto.inputs import ( - BrainUpdatableProperties, - CreateBrainProperties, -) -from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity, PublicBrain - - -class BrainsInterface(ABC): - @abstractmethod - def create_brain(self, brain: CreateBrainProperties) -> BrainEntity: - """ - Create a brain in the brains table - """ - pass - - @abstractmethod - def get_public_brains(self) -> list[PublicBrain]: - """ - Get all public brains - """ - pass - - @abstractmethod - def get_brain_details(self, brain_id: UUID, user_id: UUID) -> BrainEntity | None: - """ - Get all public brains - """ - pass - - @abstractmethod - def update_brain_last_update_time(self, brain_id: UUID) -> None: - """ - Update the last update time of the brain - """ - pass - - @abstractmethod - def delete_brain(self, brain_id: UUID): - """ - Delete a brain - """ - pass - - @abstractmethod - def update_brain_by_id( - self, brain_id: UUID, brain: BrainUpdatableProperties - ) -> BrainEntity | None: - """ - Update a brain by id - """ - pass - - @abstractmethod - def get_brain_by_id(self, brain_id: UUID) -> BrainEntity | None: - """ - Get a brain by id - """ - pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py deleted file mode 100644 index fe3288c2055a..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_users_interface.py +++ /dev/null @@ -1,95 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List -from uuid import UUID - -from quivr_core.api.modules.brain.entity.brain_entity import ( - BrainUser, - MinimalUserBrainEntity, -) - - -class BrainsUsersInterface(ABC): - @abstractmethod - def get_user_brains(self, user_id) -> list[MinimalUserBrainEntity]: - """ - Create a brain in the brains table - """ - pass - - @abstractmethod - def get_brain_for_user(self, user_id, brain_id) -> MinimalUserBrainEntity | None: - """ - Get a brain for a user - """ - pass - - @abstractmethod - def delete_brain_user_by_id( - self, - user_id: UUID, - brain_id: UUID, - ): - """ - Delete a user in a brain - """ - pass - - @abstractmethod - def delete_brain_users(self, brain_id: str): - """ - Delete all users for a brain - """ - pass - - @abstractmethod - def create_brain_user(self, user_id: UUID, brain_id, rights, default_brain: bool): - """ - Create a brain user - """ - pass - - @abstractmethod - def get_user_default_brain_id(self, user_id: UUID) -> UUID | None: - """ - Get the default brain id for a user - """ - pass - - @abstractmethod - def get_brain_users(self, brain_id: UUID) -> List[BrainUser]: - """ - Get all users for a brain - """ - pass - - @abstractmethod - def delete_brain_subscribers(self, brain_id: UUID): - """ - Delete all subscribers for a brain with Viewer rights - """ - pass - - @abstractmethod - def get_brain_subscribers_count(self, brain_id: UUID) -> int: - """ - Get the number of subscribers for a brain - """ - pass - - @abstractmethod - def update_brain_user_default_status( - self, user_id: UUID, brain_id: UUID, default_brain: bool - ): - """ - Update the default brain status for a user - """ - pass - - @abstractmethod - def update_brain_user_rights( - self, brain_id: UUID, user_id: UUID, rights: str - ) -> BrainUser: - """ - Update the rights for a user in a brain - """ - pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py deleted file mode 100644 index 35d0e2729a45..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/brains_vectors_interface.py +++ /dev/null @@ -1,41 +0,0 @@ -from abc import ABC, abstractmethod -from typing import List -from uuid import UUID - - -# TODO: Replace BrainsVectors with KnowledgeVectors interface instead -class BrainsVectorsInterface(ABC): - @abstractmethod - def create_brain_vector(self, brain_id, vector_id, file_sha1): - """ - Create a brain vector - """ - pass - - @abstractmethod - def get_vector_ids_from_file_sha1(self, file_sha1: str): - """ - Get vector ids from file sha1 - """ - pass - - @abstractmethod - def get_brain_vector_ids(self, brain_id) -> List[UUID]: - """ - Get brain vector ids - """ - pass - - @abstractmethod - def delete_file_from_brain(self, brain_id, file_name: str): - """ - Delete file from brain - """ - pass - - @abstractmethod - def delete_brain_vector(self, brain_id: str): - """ - Delete brain vector - """ - pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py deleted file mode 100644 index 170c20889ed4..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/composite_brains_connections_interface.py +++ /dev/null @@ -1,40 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - -from quivr_core.api.modules.brain.entity.composite_brain_connection_entity import ( - CompositeBrainConnectionEntity, -) - - -class CompositeBrainsConnectionsInterface(ABC): - @abstractmethod - def connect_brain( - self, composite_brain_id: UUID, connected_brain_id: UUID - ) -> CompositeBrainConnectionEntity: - """ - Connect a brain to a composite brain in the composite_brain_connections table - """ - pass - - @abstractmethod - def get_connected_brains(self, composite_brain_id: UUID) -> list[UUID]: - """ - Get all brains connected to a composite brain - """ - pass - - @abstractmethod - def disconnect_brain( - self, composite_brain_id: UUID, connected_brain_id: UUID - ) -> None: - """ - Disconnect a brain from a composite brain - """ - pass - - @abstractmethod - def is_connected_brain(self, brain_id: UUID) -> bool: - """ - Check if a brain is connected to any composite brain - """ - pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py deleted file mode 100644 index b2f2439d1634..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/external_api_secrets_interface.py +++ /dev/null @@ -1,29 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - - -class ExternalApiSecretsInterface(ABC): - @abstractmethod - def create_secret( - self, user_id: UUID, brain_id: UUID, secret_name: str, secret_value - ) -> UUID | None: - """ - Create a new secret for the API Request in given brain - """ - pass - - @abstractmethod - def read_secret( - self, user_id: UUID, brain_id: UUID, secret_name: str - ) -> UUID | None: - """ - Read a secret for the API Request in given brain - """ - pass - - @abstractmethod - def delete_secret(self, user_id: UUID, brain_id: UUID, secret_name: str) -> bool: - """ - Delete a secret from a brain - """ - pass diff --git a/backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py b/backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py deleted file mode 100644 index 725368b25b24..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/interfaces/integration_brains_interface.py +++ /dev/null @@ -1,63 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - -from quivr_core.api.modules.brain.entity.integration_brain import ( - IntegrationDescriptionEntity, - IntegrationEntity, -) - - -class IntegrationBrainInterface(ABC): - @abstractmethod - def get_integration_brain(self, brain_id: UUID) -> IntegrationEntity: - """Get the integration brain entity - - Args: - brain_id (UUID): ID of the brain - - Returns: - IntegrationEntity: Integration brain entity - """ - pass - - @abstractmethod - def add_integration_brain( - self, brain_id: UUID, integration_brain: IntegrationEntity - ) -> IntegrationEntity: - pass - - @abstractmethod - def update_integration_brain( - self, brain_id: UUID, integration_brain: IntegrationEntity - ) -> IntegrationEntity: - pass - - @abstractmethod - def delete_integration_brain(self, brain_id: UUID) -> None: - pass - - -class IntegrationDescriptionInterface(ABC): - @abstractmethod - def get_integration_description( - self, integration_id: UUID - ) -> IntegrationDescriptionEntity: - """Get the integration description entity - - Args: - integration_id (UUID): ID of the integration - - Returns: - IntegrationEntity: Integration description entity - """ - pass - - @abstractmethod - def get_all_integration_descriptions(self) -> list[IntegrationDescriptionEntity]: - pass - - @abstractmethod - def get_integration_description_by_user_brain_id( - self, brain_id: UUID, user_id: UUID - ) -> IntegrationDescriptionEntity: - pass diff --git a/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py b/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py index 80c3f8f351d1..b1349307ef1b 100644 --- a/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py @@ -8,13 +8,9 @@ from quivr_core.api.modules.brain.repository.api_brain_definitions import ( ApiBrainDefinitions, ) -from quivr_core.api.modules.brain.repository.interfaces import ( - ApiBrainDefinitionsInterface, -) class ApiBrainDefinitionService: - repository: ApiBrainDefinitionsInterface def __init__(self): self.repository = ApiBrainDefinitions() diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py deleted file mode 100644 index 72e989c00579..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/brain_authorization_service.py +++ /dev/null @@ -1,78 +0,0 @@ -from typing import List, Optional, Union -from uuid import UUID - -from fastapi import Depends, HTTPException, status - -from quivr_core.api.middlewares.auth.auth_bearer import get_current_user -from quivr_core.api.modules.brain.entity.brain_entity import RoleEnum -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService -from quivr_core.api.modules.user.entity.user_identity import UserIdentity - -brain_user_service = BrainUserService() -brain_service = BrainService() - - -def has_brain_authorization( - required_roles: Optional[Union[RoleEnum, List[RoleEnum]]] = RoleEnum.Owner, -): - """ - Decorator to check if the user has the required role(s) for the brain - param: required_roles: The role(s) required to access the brain - return: A wrapper function that checks the authorization - """ - - async def wrapper( - brain_id: UUID, current_user: UserIdentity = Depends(get_current_user) - ): - nonlocal required_roles - if isinstance(required_roles, str): - required_roles = [required_roles] # Convert single role to a list - validate_brain_authorization( - brain_id=brain_id, user_id=current_user.id, required_roles=required_roles - ) - - return wrapper - - -def validate_brain_authorization( - brain_id: UUID, - user_id: UUID, - required_roles: Optional[Union[RoleEnum, List[RoleEnum]]] = RoleEnum.Owner, -): - """ - Function to check if the user has the required role(s) for the brain - param: brain_id: The id of the brain - param: user_id: The id of the user - param: required_roles: The role(s) required to access the brain - return: None - """ - - brain = brain_service.get_brain_details(brain_id, user_id) - - if brain and brain.status == "public": - return - - if required_roles is None: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Missing required role", - ) - - user_brain = brain_user_service.get_brain_for_user(user_id, brain_id) - if user_brain is None: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You don't have permission for this brain", - ) - - # Convert single role to a list to handle both cases - if isinstance(required_roles, str): - required_roles = [required_roles] - - # Check if the user has at least one of the required roles - if user_brain.rights not in required_roles: - raise HTTPException( - status_code=status.HTTP_403_FORBIDDEN, - detail="You don't have the required role(s) for this brain", - ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_service.py index 8c2a0a27c5e9..f57b70c23f36 100644 --- a/backend/core/quivr_core/api/modules/brain/service/brain_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/brain_service.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Tuple from uuid import UUID from fastapi import HTTPException @@ -22,6 +22,9 @@ IntegrationBrain, IntegrationDescription, ) +from quivr_core.api.modules.brain.repository.external_api_secrets import ( + ExternalApiSecrets, +) from quivr_core.api.modules.brain.service.api_brain_definition_service import ( ApiBrainDefinitionService, ) @@ -36,12 +39,6 @@ class BrainService: - # brain_repository: BrainsInterface - # brain_user_repository: BrainsUsersInterface - # brain_vector_repository: BrainsVectorsInterface - # external_api_secrets_repository: ExternalApiSecretsInterface - # integration_brains_repository: IntegrationBrainInterface - # integration_description_repository: IntegrationDescriptionInterface def __init__(self): self.brain_repository: Brains = Brains() @@ -49,6 +46,7 @@ def __init__(self): self.brain_vector = BrainsVectors() self.integration_brains_repository = IntegrationBrain() self.integration_description_repository = IntegrationDescription() + self.external_api_secrets_repository = ExternalApiSecrets() def get_brain_by_id(self, brain_id: UUID): return self.brain_repository.get_brain_by_id(brain_id) @@ -64,7 +62,7 @@ def find_brain_from_question( chat_id: UUID, history, vector_store: CustomSupabaseVectorStore, - ) -> (Optional[BrainEntity], dict[str, str]): + ) -> Tuple[Optional[BrainEntity], dict[str, str]]: """Find the brain to use for a question. Args: @@ -154,13 +152,14 @@ def create_brain_api( secrets_values = brain.brain_secrets_values - for secret_name in secrets_values: - self.external_api_secrets_repository.create_secret( - user_id=user_id, - brain_id=created_brain.brain_id, - secret_name=secret_name, - secret_value=secrets_values[secret_name], - ) + if secrets_values: + for secret_name in secrets_values: + self.external_api_secrets_repository.create_secret( + user_id=user_id, + brain_id=created_brain.brain_id, + secret_name=secret_name, + secret_value=secrets_values[secret_name], + ) return created_brain diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py deleted file mode 100644 index efe9797fc7fa..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .resend_invitation_email import resend_invitation_email -from .subscription_invitation_service import SubscriptionInvitationService diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py deleted file mode 100644 index ffc0f17297a3..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/resend_invitation_email.py +++ /dev/null @@ -1,57 +0,0 @@ -from uuid import UUID - -from quivr_core.api.logger import get_logger -from quivr_core.api.models.brains_subscription_invitations import BrainSubscription -from quivr_core.api.models.settings import BrainSettings -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.packages.emails.send_email import send_email - -logger = get_logger(__name__) - -brain_service = BrainService() - - -def get_brain_url(origin: str, brain_id: UUID) -> str: - """Generates the brain URL based on the brain_id.""" - - return f"{origin}/invitation/{brain_id}" - - -def resend_invitation_email( - brain_subscription: BrainSubscription, - inviter_email: str, - user_id: UUID, - origin: str = "https://chat.quivr.app", -): - brains_settings = BrainSettings() # pyright: ignore reportPrivateUsage=none - - brain_url = get_brain_url(origin, brain_subscription.brain_id) - - invitation_brain = brain_service.get_brain_details( - brain_subscription.brain_id, user_id - ) - if invitation_brain is None: - raise Exception("Brain not found") - brain_name = invitation_brain.name - - html_body = f""" -

Brain {brain_name} has been shared with you by {inviter_email}.

-

Click here to access your brain.

- """ - - try: - r = send_email( - { - "from": brains_settings.resend_email_address, - "to": [brain_subscription.email], - "subject": "Quivr - Brain Shared With You", - "reply_to": "no-reply@quivr.app", - "html": html_body, - } - ) - logger.info("Resend response", r) - except Exception as e: - logger.error(f"Error sending email: {e}") - return - - return r diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py deleted file mode 100644 index 84448ee379f7..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/brain_subscription/subscription_invitation_service.py +++ /dev/null @@ -1,106 +0,0 @@ -from quivr_core.api.logger import get_logger -from quivr_core.api.models.brains_subscription_invitations import BrainSubscription -from quivr_core.api.models.settings import get_supabase_client -from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService -from quivr_core.api.modules.user.service.user_service import UserService - -logger = get_logger(__name__) - - -brain_user_service = BrainUserService() -user_service = UserService() - - -class SubscriptionInvitationService: - def __init__(self): - self.supabase_client = get_supabase_client() - - def create_subscription_invitation(self, brain_subscription: BrainSubscription): - logger.info("Creating subscription invitation") - response = ( - self.supabase_client.table("brain_subscription_invitations") - .insert( - { - "brain_id": str(brain_subscription.brain_id), - "email": brain_subscription.email, - "rights": brain_subscription.rights, - } - ) - .execute() - ) - return response.data - - def update_subscription_invitation(self, brain_subscription: BrainSubscription): - logger.info("Updating subscription invitation") - response = ( - self.supabase_client.table("brain_subscription_invitations") - .update({"rights": brain_subscription.rights}) - .eq("brain_id", str(brain_subscription.brain_id)) - .eq("email", brain_subscription.email) - .execute() - ) - return response.data - - def create_or_update_subscription_invitation( - self, - brain_subscription: BrainSubscription, - ) -> bool: - """ - Creates a subscription invitation if it does not exist, otherwise updates it. - Returns True if the invitation was created or updated and False if user already has access. - """ - response = ( - self.supabase_client.table("brain_subscription_invitations") - .select("*") - .eq("brain_id", str(brain_subscription.brain_id)) - .eq("email", brain_subscription.email) - .execute() - ) - - if response.data: - self.update_subscription_invitation(brain_subscription) - return True - else: - user_id = user_service.get_user_id_by_email(brain_subscription.email) - brain_user = None - - if user_id is not None: - brain_id = brain_subscription.brain_id - brain_user = brain_user_service.get_brain_for_user(user_id, brain_id) - - if brain_user is None: - self.create_subscription_invitation(brain_subscription) - return True - - return False - - def fetch_invitation(self, subscription: BrainSubscription): - logger.info("Fetching subscription invitation") - response = ( - self.supabase_client.table("brain_subscription_invitations") - .select("*") - .eq("brain_id", str(subscription.brain_id)) - .eq("email", subscription.email) - .execute() - ) - if response.data: - return response.data[0] # return the first matching invitation - else: - return None - - def remove_invitation(self, subscription: BrainSubscription): - logger.info( - f"Removing subscription invitation for email {subscription.email} and brain {subscription.brain_id}" - ) - response = ( - self.supabase_client.table("brain_subscription_invitations") - .delete() - .eq("brain_id", str(subscription.brain_id)) - .eq("email", subscription.email) - .execute() - ) - logger.info( - f"Removed subscription invitation for email {subscription.email} and brain {subscription.brain_id}" - ) - logger.info(response) - return response.data diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py index de07051bd34f..86c76917ecfd 100644 --- a/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py @@ -2,7 +2,6 @@ from uuid import UUID from fastapi import HTTPException - from quivr_core.api.logger import get_logger from quivr_core.api.modules.brain.entity.brain_entity import ( BrainEntity, @@ -16,15 +15,6 @@ from quivr_core.api.modules.brain.repository.external_api_secrets import ( ExternalApiSecrets, ) -from quivr_core.api.modules.brain.repository.interfaces.brains_interface import ( - BrainsInterface, -) -from quivr_core.api.modules.brain.repository.interfaces.brains_users_interface import ( - BrainsUsersInterface, -) -from quivr_core.api.modules.brain.repository.interfaces.external_api_secrets_interface import ( - ExternalApiSecretsInterface, -) from quivr_core.api.modules.brain.service.api_brain_definition_service import ( ApiBrainDefinitionService, ) @@ -37,9 +27,6 @@ class BrainUserService: - brain_repository: BrainsInterface - brain_user_repository: BrainsUsersInterface - external_api_secrets_repository: ExternalApiSecretsInterface def __init__(self): self.brain_repository = Brains() diff --git a/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py b/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py index defddaf92fc1..b497a67e3431 100644 --- a/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py +++ b/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py @@ -1,13 +1,9 @@ from uuid import UUID import requests - -from quivr_core.api.logger import get_logger - -logger = get_logger(__name__) - from fastapi import HTTPException +from quivr_core.api.logger import get_logger from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( ApiBrainDefinitionSchema, ) @@ -16,13 +12,14 @@ ) from quivr_core.api.modules.brain.service.brain_service import BrainService +logger = get_logger(__name__) brain_service = BrainService() api_brain_definition_service = ApiBrainDefinitionService() def get_api_call_response_as_text( method, api_url, params, search_params, secrets -) -> str: +) -> str | None: headers = {} api_url_with_search_params = api_url @@ -84,7 +81,7 @@ def extract_api_brain_definition_values_from_llm_output( return params_values -def call_brain_api(brain_id: UUID, user_id: UUID, arguments: dict) -> str: +def call_brain_api(brain_id: UUID, user_id: UUID, arguments: dict) -> str | None: brain_definition = api_brain_definition_service.get_api_brain_definition(brain_id) if brain_definition is None: diff --git a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py index 01c04148eac3..f1abfc032cc1 100644 --- a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py +++ b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py @@ -22,7 +22,7 @@ class DocumentAnswer: file_similarity: float = 0.0 -def get_question_context_from_brain(brain_id: UUID, question: str) -> str: +def get_question_context_from_brain(brain_id: UUID, question: str) -> list[str]: """Finds the best brain to answer the question based on the question's meaning. Args: @@ -40,7 +40,7 @@ def get_question_context_from_brain(brain_id: UUID, question: str) -> str: supabase_client, embeddings, table_name="vectors", - brain_id=str(brain_id), + brain_id=brain_id, number_docs=20, ) documents = vector_store.similarity_search(question, k=20, threshold=0.8) diff --git a/backend/core/quivr_core/api/modules/brain/tests/test_brains_interface.py b/backend/core/quivr_core/api/modules/brain/tests/test_brains_interface.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py b/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py index ea04d1f9e50d..3c3187b17a3e 100644 --- a/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py +++ b/backend/core/quivr_core/api/modules/knowledge/controller/knowledge_routes.py @@ -3,13 +3,8 @@ from fastapi import APIRouter, Depends, HTTPException, Query from quivr_core.api.logger import get_logger -from quivr_core.api.middlewares.auth import AuthBearer, get_current_user -from quivr_core.api.modules.brain.entity.brain_entity import RoleEnum -from quivr_core.api.modules.brain.service.brain_authorization_service import ( - has_brain_authorization, - validate_brain_authorization, -) from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.api.modules.dependencies import get_current_user from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService from quivr_core.api.modules.upload.service.generate_file_signed_url import ( generate_file_signed_url, @@ -22,9 +17,7 @@ knowledge_service = KnowledgeService() -@knowledge_router.get( - "/knowledge", dependencies=[Depends(AuthBearer())], tags=["Knowledge"] -) +@knowledge_router.get("/knowledge", tags=["Knowledge"]) async def list_knowledge_in_brain_endpoint( brain_id: UUID = Query(..., description="The ID of the brain"), current_user: UserIdentity = Depends(get_current_user), @@ -33,8 +26,6 @@ async def list_knowledge_in_brain_endpoint( Retrieve and list all the knowledge in a brain. """ - validate_brain_authorization(brain_id=brain_id, user_id=current_user.id) - knowledges = knowledge_service.get_all_knowledge(brain_id) return {"knowledges": knowledges} @@ -42,10 +33,6 @@ async def list_knowledge_in_brain_endpoint( @knowledge_router.delete( "/knowledge/{knowledge_id}", - dependencies=[ - Depends(AuthBearer()), - Depends(has_brain_authorization(RoleEnum.Owner)), - ], tags=["Knowledge"], ) async def delete_endpoint( @@ -74,7 +61,6 @@ async def delete_endpoint( @knowledge_router.get( "/knowledge/{knowledge_id}/signed_download_url", - dependencies=[Depends(AuthBearer())], tags=["Knowledge"], ) async def generate_signed_url_endpoint( @@ -87,8 +73,6 @@ async def generate_signed_url_endpoint( knowledge = knowledge_service.get_knowledge(knowledge_id) - validate_brain_authorization(brain_id=knowledge.brain_id, user_id=current_user.id) - if knowledge.file_name == None: raise HTTPException( status_code=404, From c2b2ad7a5651881fc0b161446e1267f2dc8b813e Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 14:54:14 +0200 Subject: [PATCH 08/20] storage interface + local storage --- backend/core/quivr_core/storage/__init__.py | 0 .../core/quivr_core/storage/local_storage.py | 60 +++++++++++++++++++ .../core/quivr_core/storage/storage_base.py | 13 ++++ 3 files changed, 73 insertions(+) create mode 100644 backend/core/quivr_core/storage/__init__.py create mode 100644 backend/core/quivr_core/storage/local_storage.py create mode 100644 backend/core/quivr_core/storage/storage_base.py diff --git a/backend/core/quivr_core/storage/__init__.py b/backend/core/quivr_core/storage/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/storage/local_storage.py b/backend/core/quivr_core/storage/local_storage.py new file mode 100644 index 000000000000..3e1a642c1965 --- /dev/null +++ b/backend/core/quivr_core/storage/local_storage.py @@ -0,0 +1,60 @@ +import mmap +import os +from io import BytesIO +from pathlib import Path +from typing import BinaryIO +from uuid import UUID, uuid4 + +import aiofiles + +from quivr_core.storage.storage_base import StorageBase + + +class QuivrFile: + __slots__ = ["fd", "filename", "brain_id", "file_size"] + + @classmethod + def from_path(cls, path: str): + fd = os.open(path, os.O_RDONLY) + file_size = os.stat(path).st_size + mmapped_file = mmap.mmap(fd, file_size, access=mmap.ACCESS_READ) + # TODO: parse file_name and brain_id + file_name = "" + return cls( + fd=BytesIO(mmapped_file), + brain_id=uuid4(), + filename=file_name, + file_size=file_size, + ) + + def __init__( + self, fd: BinaryIO, filename: str, brain_id: UUID, file_size: int | None = None + ) -> None: + self.fd = fd + self.brain_id = brain_id + self.filename = filename + self.file_size = file_size + + def local_filepath(self) -> str: + return os.path.join(str(self.brain_id), self.filename) + + +class LocalStorage(StorageBase): + def __init__(self, dir_path: Path | None = None): + if dir_path is None: + self.dir_path = os.getenv("QUIVR_LOCAL_STORAGE", "~/.cache/quivr/files") + else: + self.dir_path = dir_path + os.makedirs(self.dir_path, exist_ok=True) + + async def upload_file(self, file: QuivrFile, exists_ok: bool = False): + path = os.path.join(self.dir_path, file.local_filepath()) + if os.path.exists(path) and not exists_ok: + raise FileExistsError("file already exists") + + async with aiofiles.open(path, "wb") as f: + await f.write(file.fd.read()) + + # TODO: + async def remove_file(self, file_name: str): + pass diff --git a/backend/core/quivr_core/storage/storage_base.py b/backend/core/quivr_core/storage/storage_base.py new file mode 100644 index 000000000000..265980bc4d57 --- /dev/null +++ b/backend/core/quivr_core/storage/storage_base.py @@ -0,0 +1,13 @@ +from abc import ABC, abstractmethod + +from quivr_core.storage.local_storage import QuivrFile + + +class StorageBase(ABC): + @abstractmethod + async def upload_file(self, file: QuivrFile, exists_ok: bool = False): + raise Exception("Unimplemented upload_file method") + + @abstractmethod + async def remove_file(self, file_name: str): + raise Exception("Unimplemented remove_file method") From 3331b5423f95c541b82d4c049f5e8f5f2f7b8d5e Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 17:49:07 +0200 Subject: [PATCH 09/20] core brain cleanup --- .../api/modules/brain/api_brain_qa.py | 500 --------------- .../api/modules/brain/composite_brain_qa.py | 593 ------------------ .../modules/brain/controller/brain_routes.py | 60 -- .../modules/brain/entity/integration_brain.py | 2 +- .../api/modules/brain/qa_headless.py | 270 -------- .../api/modules/brain/qa_interface.py | 58 -- .../brain/repository/api_brain_definitions.py | 57 -- .../brain/repository/external_api_secrets.py | 57 -- .../brain/repository/integration_brains.py | 13 +- .../service/api_brain_definition_service.py | 36 -- .../modules/brain/service/brain_service.py | 150 +---- .../brain/service/brain_user_service.py | 26 +- .../brain/service/brain_vector_service.py | 10 +- .../modules/brain/service/call_brain_api.py | 115 ---- ...get_api_brain_definition_as_json_schema.py | 65 -- .../get_question_context_from_brain.py | 5 +- .../service/integration_brain_service.py | 8 +- .../{service => tests}/test_brain_service.py | 0 18 files changed, 17 insertions(+), 2008 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/brain/api_brain_qa.py delete mode 100644 backend/core/quivr_core/api/modules/brain/composite_brain_qa.py delete mode 100644 backend/core/quivr_core/api/modules/brain/qa_headless.py delete mode 100644 backend/core/quivr_core/api/modules/brain/qa_interface.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py delete mode 100644 backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/call_brain_api.py delete mode 100644 backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py rename backend/core/quivr_core/api/modules/brain/{service => tests}/test_brain_service.py (100%) diff --git a/backend/core/quivr_core/api/modules/brain/api_brain_qa.py b/backend/core/quivr_core/api/modules/brain/api_brain_qa.py deleted file mode 100644 index 87ab8430fa75..000000000000 --- a/backend/core/quivr_core/api/modules/brain/api_brain_qa.py +++ /dev/null @@ -1,500 +0,0 @@ -import json -from typing import Optional -from uuid import UUID - -import jq -import requests -from fastapi import HTTPException -from litellm import completion - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.brain.qa_interface import QAInterface -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.brain.service.call_brain_api import call_brain_api -from quivr_core.api.modules.brain.service.get_api_brain_definition_as_json_schema import ( - get_api_brain_definition_as_json_schema, -) -from quivr_core.api.modules.chat.dto.chats import ChatQuestion -from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory -from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service - -brain_service = BrainService() -chat_service = get_service(ChatService)() - -logger = get_logger(__name__) - - -class UUIDEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, UUID): - # if the object is uuid, we simply return the value of uuid - return str(obj) - return super().default(obj) - - -class APIBrainQA(KnowledgeBrainQA, QAInterface): - user_id: UUID - raw: bool = False - jq_instructions: Optional[str] = None - - def __init__( - self, - model: str, - brain_id: str, - chat_id: str, - streaming: bool = False, - prompt_id: Optional[UUID] = None, - raw: bool = False, - jq_instructions: Optional[str] = None, - **kwargs, - ): - user_id = kwargs.get("user_id") - if not user_id: - raise HTTPException(status_code=400, detail="Cannot find user id") - - super().__init__( - model=model, - brain_id=brain_id, - chat_id=chat_id, - streaming=streaming, - prompt_id=prompt_id, - **kwargs, - ) - self.user_id = user_id - self.raw = raw - self.jq_instructions = jq_instructions - - def get_api_call_response_as_text( - self, method, api_url, params, search_params, secrets - ) -> str: - headers = {} - - api_url_with_search_params = api_url - if search_params: - api_url_with_search_params += "?" - for search_param in search_params: - api_url_with_search_params += ( - f"{search_param}={search_params[search_param]}&" - ) - - for secret in secrets: - headers[secret] = secrets[secret] - - try: - if method in ["GET", "DELETE"]: - response = requests.request( - method, - url=api_url_with_search_params, - params=params or None, - headers=headers or None, - ) - elif method in ["POST", "PUT", "PATCH"]: - response = requests.request( - method, - url=api_url_with_search_params, - json=params or None, - headers=headers or None, - ) - else: - raise ValueError(f"Invalid method: {method}") - - return response.text - - except Exception as e: - logger.error(f"Error calling API: {e}") - return None - - def log_steps(self, message: str, type: str): - if "api" not in self.metadata: - self.metadata["api"] = {} - if "steps" not in self.metadata["api"]: - self.metadata["api"]["steps"] = [] - self.metadata["api"]["steps"].append( - { - "number": len(self.metadata["api"]["steps"]), - "type": type, - "message": message, - } - ) - - async def make_completion( - self, - messages, - functions, - brain_id: UUID, - recursive_count=0, - should_log_steps=True, - ) -> str | None: - if recursive_count > 5: - self.log_steps( - "The assistant is having issues and took more than 5 calls to the API. Please try again later or an other instruction.", - "error", - ) - return - - if "api" not in self.metadata: - self.metadata["api"] = {} - if "raw" not in self.metadata["api"]: - self.metadata["api"]["raw_enabled"] = self.raw - - response = completion( - model=self.model, - temperature=self.temperature, - max_tokens=self.max_tokens, - messages=messages, - functions=functions, - stream=True, - function_call="auto", - ) - - function_call = { - "name": None, - "arguments": "", - } - for chunk in response: - finish_reason = chunk.choices[0].finish_reason - if finish_reason == "stop": - self.log_steps("Quivr has finished", "info") - break - if ( - "function_call" in chunk.choices[0].delta - and chunk.choices[0].delta["function_call"] - ): - if chunk.choices[0].delta["function_call"].name: - function_call["name"] = chunk.choices[0].delta["function_call"].name - if chunk.choices[0].delta["function_call"].arguments: - function_call["arguments"] += ( - chunk.choices[0].delta["function_call"].arguments - ) - - elif finish_reason == "function_call": - try: - arguments = json.loads(function_call["arguments"]) - - except Exception: - self.log_steps(f"Issues with {arguments}", "error") - arguments = {} - - self.log_steps(f"Calling {brain_id} with arguments {arguments}", "info") - - try: - api_call_response = call_brain_api( - brain_id=brain_id, - user_id=self.user_id, - arguments=arguments, - ) - except Exception as e: - logger.info(f"Error while calling API: {e}") - api_call_response = f"Error while calling API: {e}" - function_name = function_call["name"] - self.log_steps("Quivr has called the API", "info") - messages.append( - { - "role": "function", - "name": function_call["name"], - "content": f"The function {function_name} was called and gave The following answer:(data from function) {api_call_response} (end of data from function). Don't call this function again unless there was an error or extremely necessary and asked specifically by the user. If an error, display it to the user in raw.", - } - ) - - self.metadata["api"]["raw_response"] = json.loads(api_call_response) - if self.raw: - # Yield the raw response in a format that can then be catched by the generate_stream function - response_to_yield = f"````raw_response: {api_call_response}````" - - yield response_to_yield - return - - async for value in self.make_completion( - messages=messages, - functions=functions, - brain_id=brain_id, - recursive_count=recursive_count + 1, - should_log_steps=should_log_steps, - ): - yield value - - else: - if ( - hasattr(chunk.choices[0], "delta") - and chunk.choices[0].delta - and hasattr(chunk.choices[0].delta, "content") - ): - content = chunk.choices[0].delta.content - yield content - else: # pragma: no cover - yield "**...**" - break - - async def generate_stream( - self, - chat_id: UUID, - question: ChatQuestion, - save_answer: bool = True, - should_log_steps: Optional[bool] = True, - ): - brain = brain_service.get_brain_by_id(self.brain_id) - - if not brain: - raise HTTPException(status_code=404, detail="Brain not found") - - prompt_content = "You are a helpful assistant that can access functions to help answer questions. If there are information missing in the question, you can ask follow up questions to get more information to the user. Once all the information is available, you can call the function to get the answer." - - if self.prompt_to_use: - prompt_content += self.prompt_to_use.content - - messages = [{"role": "system", "content": prompt_content}] - - history = chat_service.get_chat_history(self.chat_id) - - for message in history: - formatted_message = [ - {"role": "user", "content": message.user_message}, - {"role": "assistant", "content": message.assistant}, - ] - messages.extend(formatted_message) - - messages.append({"role": "user", "content": question.question}) - - if save_answer: - streamed_chat_history = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": "", - "brain_id": self.brain_id, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": streamed_chat_history.message_id, - "message_time": streamed_chat_history.message_time, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "brain_id": str(self.brain_id), - "metadata": self.metadata, - } - ) - else: - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": None, - "message_time": None, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "brain_id": str(self.brain_id), - "metadata": self.metadata, - } - ) - response_tokens = [] - async for value in self.make_completion( - messages=messages, - functions=[get_api_brain_definition_as_json_schema(brain)], - brain_id=self.brain_id, - should_log_steps=should_log_steps, - ): - # Look if the value is a raw response - if value.startswith("````raw_response:"): - raw_value_cleaned = value.replace("````raw_response: ", "").replace( - "````", "" - ) - logger.info(f"Raw response: {raw_value_cleaned}") - if self.jq_instructions: - json_raw_value_cleaned = json.loads(raw_value_cleaned) - raw_value_cleaned = ( - jq.compile(self.jq_instructions) - .input_value(json_raw_value_cleaned) - .first() - ) - streamed_chat_history.assistant = raw_value_cleaned - response_tokens.append(raw_value_cleaned) - yield f"data: {json.dumps(streamed_chat_history.dict())}" - else: - streamed_chat_history.assistant = value - response_tokens.append(value) - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - if save_answer: - chat_service.update_message_by_id( - message_id=str(streamed_chat_history.message_id), - user_message=question.question, - assistant="".join(str(token) for token in response_tokens), - metadata=self.metadata, - ) - - def make_completion_without_streaming( - self, - messages, - functions, - brain_id: UUID, - recursive_count=0, - should_log_steps=False, - ): - if recursive_count > 5: - print( - "The assistant is having issues and took more than 5 calls to the API. Please try again later or an other instruction." - ) - return - - if should_log_steps: - print("🧠🧠") - - response = completion( - model=self.model, - temperature=self.temperature, - max_tokens=self.max_tokens, - messages=messages, - functions=functions, - stream=False, - function_call="auto", - ) - - response_message = response.choices[0].message - finish_reason = response.choices[0].finish_reason - - if finish_reason == "function_call": - function_call = response_message.function_call - try: - arguments = json.loads(function_call.arguments) - - except Exception: - arguments = {} - - if should_log_steps: - self.log_steps(f"Calling {brain_id} with arguments {arguments}", "info") - - try: - api_call_response = call_brain_api( - brain_id=brain_id, - user_id=self.user_id, - arguments=arguments, - ) - except Exception as e: - raise HTTPException( - status_code=400, - detail=f"Error while calling API: {e}", - ) - - function_name = function_call.name - messages.append( - { - "role": "function", - "name": function_call.name, - "content": f"The function {function_name} was called and gave The following answer:(data from function) {api_call_response} (end of data from function). Don't call this function again unless there was an error or extremely necessary and asked specifically by the user.", - } - ) - - return self.make_completion_without_streaming( - messages=messages, - functions=functions, - brain_id=brain_id, - recursive_count=recursive_count + 1, - should_log_steps=should_log_steps, - ) - - if finish_reason == "stop": - return response_message - - else: - print("Never ending completion") - - def generate_answer( - self, - chat_id: UUID, - question: ChatQuestion, - save_answer: bool = True, - raw: bool = True, - ): - if not self.brain_id: - raise HTTPException( - status_code=400, detail="No brain id provided in the question" - ) - - brain = brain_service.get_brain_by_id(self.brain_id) - - if not brain: - raise HTTPException(status_code=404, detail="Brain not found") - - prompt_content = "You are a helpful assistant that can access functions to help answer questions. If there are information missing in the question, you can ask follow up questions to get more information to the user. Once all the information is available, you can call the function to get the answer." - - if self.prompt_to_use: - prompt_content += self.prompt_to_use.content - - messages = [{"role": "system", "content": prompt_content}] - - history = chat_service.get_chat_history(self.chat_id) - - for message in history: - formatted_message = [ - {"role": "user", "content": message.user_message}, - {"role": "assistant", "content": message.assistant}, - ] - messages.extend(formatted_message) - - messages.append({"role": "user", "content": question.question}) - - response = self.make_completion_without_streaming( - messages=messages, - functions=[get_api_brain_definition_as_json_schema(brain)], - brain_id=self.brain_id, - should_log_steps=False, - raw=raw, - ) - - answer = response.content - if save_answer: - new_chat = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "brain_id": self.brain_id, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "message_time": new_chat.message_time, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "message_id": new_chat.message_id, - "metadata": self.metadata, - "brain_id": str(self.brain_id), - } - ) - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "message_time": "123", - "prompt_title": None, - "brain_name": brain.name, - "message_id": None, - "metadata": self.metadata, - "brain_id": str(self.brain_id), - } - ) diff --git a/backend/core/quivr_core/api/modules/brain/composite_brain_qa.py b/backend/core/quivr_core/api/modules/brain/composite_brain_qa.py deleted file mode 100644 index 25e460a5617a..000000000000 --- a/backend/core/quivr_core/api/modules/brain/composite_brain_qa.py +++ /dev/null @@ -1,593 +0,0 @@ -import json -from typing import Optional -from uuid import UUID - -from fastapi import HTTPException -from litellm import completion - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.api_brain_qa import APIBrainQA -from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity, BrainType -from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA -from quivr_core.api.modules.brain.qa_headless import HeadlessQA -from quivr_core.api.modules.brain.service.brain_service import BrainService -from quivr_core.api.modules.chat.dto.chats import ChatQuestion -from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory -from quivr_core.api.modules.chat.dto.outputs import ( - BrainCompletionOutput, - CompletionMessage, - CompletionResponse, - GetChatHistoryOutput, -) -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service - -brain_service = BrainService() -chat_service = get_service(ChatService)() - -logger = get_logger(__name__) - - -def format_brain_to_tool(brain): - return { - "type": "function", - "function": { - "name": str(brain.id), - "description": brain.description, - "parameters": { - "type": "object", - "properties": { - "question": { - "type": "string", - "description": "Question to ask the brain", - }, - }, - "required": ["question"], - }, - }, - } - - -class CompositeBrainQA( - KnowledgeBrainQA, -): - user_id: UUID - - def __init__( - self, - model: str, - brain_id: str, - chat_id: str, - streaming: bool = False, - prompt_id: Optional[UUID] = None, - **kwargs, - ): - user_id = kwargs.get("user_id") - if not user_id: - raise HTTPException(status_code=400, detail="Cannot find user id") - - super().__init__( - model=model, - brain_id=brain_id, - chat_id=chat_id, - streaming=streaming, - prompt_id=prompt_id, - **kwargs, - ) - self.user_id = user_id - - def get_answer_generator_from_brain_type(self, brain: BrainEntity): - if brain.brain_type == BrainType.composite: - return self.generate_answer - elif brain.brain_type == BrainType.api: - return APIBrainQA( - brain_id=str(brain.id), - chat_id=self.chat_id, - model=self.model, - max_tokens=self.max_tokens, - temperature=self.temperature, - streaming=self.streaming, - prompt_id=self.prompt_id, - user_id=str(self.user_id), - raw=brain.raw, - jq_instructions=brain.jq_instructions, - ).generate_answer - elif brain.brain_type == BrainType.doc: - return KnowledgeBrainQA( - brain_id=str(brain.id), - chat_id=self.chat_id, - max_tokens=self.max_tokens, - temperature=self.temperature, - streaming=self.streaming, - prompt_id=self.prompt_id, - ).generate_answer - - def generate_answer( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool - ) -> str: - brain = brain_service.get_brain_by_id(question.brain_id) - - connected_brains = brain_service.get_connected_brains(self.brain_id) - - if not connected_brains: - response = HeadlessQA( - chat_id=chat_id, - model=self.model, - max_tokens=self.max_tokens, - temperature=self.temperature, - streaming=self.streaming, - prompt_id=self.prompt_id, - ).generate_answer(chat_id, question, save_answer=False) - if save_answer: - new_chat = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": response.assistant, - "brain_id": question.brain_id, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": response.assistant, - "message_time": new_chat.message_time, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name, - "message_id": new_chat.message_id, - "brain_id": str(brain.id), - } - ) - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": response.assistant, - "message_time": None, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name, - "message_id": None, - "brain_id": str(brain.id), - } - ) - - tools = [] - available_functions = {} - - connected_brains_details = {} - for connected_brain_id in connected_brains: - connected_brain = brain_service.get_brain_by_id(connected_brain_id) - if connected_brain is None: - continue - - tools.append(format_brain_to_tool(connected_brain)) - - available_functions[connected_brain_id] = ( - self.get_answer_generator_from_brain_type(connected_brain) - ) - - connected_brains_details[str(connected_brain.id)] = connected_brain - - CHOOSE_BRAIN_FROM_TOOLS_PROMPT = ( - "Based on the provided user content, find the most appropriate tools to answer" - + "If you can't find any tool to answer and only then, and if you can answer without using any tool. In that case, let the user know that you are not using any particular brain (i.e tool) " - ) - - messages = [{"role": "system", "content": CHOOSE_BRAIN_FROM_TOOLS_PROMPT}] - - history = chat_service.get_chat_history(self.chat_id) - - for message in history: - formatted_message = [ - {"role": "user", "content": message.user_message}, - {"role": "assistant", "content": message.assistant}, - ] - messages.extend(formatted_message) - - messages.append({"role": "user", "content": question.question}) - - response = completion( - model="gpt-3.5-turbo-0125", - messages=messages, - tools=tools, - tool_choice="auto", - ) - - brain_completion_output = self.make_recursive_tool_calls( - messages, - question, - chat_id, - tools, - available_functions, - recursive_count=0, - last_completion_response=response.choices[0], - ) - - if brain_completion_output: - answer = brain_completion_output.response.message.content - new_chat = None - if save_answer: - new_chat = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "brain_id": question.brain_id, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": brain_completion_output.response.message.content, - "message_time": new_chat.message_time if new_chat else None, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "message_id": new_chat.message_id if new_chat else None, - "brain_id": str(brain.id) if brain else None, - } - ) - - def make_recursive_tool_calls( - self, - messages, - question, - chat_id, - tools=[], - available_functions={}, - recursive_count=0, - last_completion_response: CompletionResponse = None, - ): - if recursive_count > 5: - print( - "The assistant is having issues and took more than 5 calls to the tools. Please try again later or an other instruction." - ) - return None - - finish_reason = last_completion_response.finish_reason - if finish_reason == "stop": - messages.append(last_completion_response.message) - return BrainCompletionOutput( - **{ - "messages": messages, - "question": question.question, - "response": last_completion_response, - } - ) - - if finish_reason == "tool_calls": - response_message: CompletionMessage = last_completion_response.message - tool_calls = response_message.tool_calls - - messages.append(response_message) - - if ( - len(tool_calls) == 0 - or tool_calls is None - or len(available_functions) == 0 - ): - return - - for tool_call in tool_calls: - function_name = tool_call.function.name - function_to_call = available_functions[function_name] - function_args = json.loads(tool_call.function.arguments) - question = ChatQuestion( - question=function_args["question"], brain_id=function_name - ) - - # TODO: extract chat_id from generate_answer function of XBrainQA - function_response = function_to_call( - chat_id=chat_id, - question=question, - save_answer=False, - ) - messages.append( - { - "tool_call_id": tool_call.id, - "role": "tool", - "name": function_name, - "content": function_response.assistant, - } - ) - - PROMPT_2 = "If initial question can be answered by our conversation messages, then give an answer and end the conversation." - - messages.append({"role": "system", "content": PROMPT_2}) - - for idx, msg in enumerate(messages): - logger.info( - f"Message {idx}: Role - {msg['role']}, Content - {msg['content']}" - ) - - response_after_tools_answers = completion( - model="gpt-3.5-turbo-0125", - messages=messages, - tools=tools, - tool_choice="auto", - ) - - return self.make_recursive_tool_calls( - messages, - question, - chat_id, - tools, - available_functions, - recursive_count=recursive_count + 1, - last_completion_response=response_after_tools_answers.choices[0], - ) - - async def generate_stream( - self, - chat_id: UUID, - question: ChatQuestion, - save_answer: bool, - should_log_steps: Optional[bool] = True, - ): - brain = brain_service.get_brain_by_id(question.brain_id) - if save_answer: - streamed_chat_history = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": "", - "brain_id": question.brain_id, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": streamed_chat_history.message_id, - "message_time": streamed_chat_history.message_time, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "brain_id": str(brain.id) if brain else None, - } - ) - else: - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": None, - "message_time": None, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": brain.name if brain else None, - "brain_id": str(brain.id) if brain else None, - } - ) - - connected_brains = brain_service.get_connected_brains(self.brain_id) - - if not connected_brains: - headlesss_answer = HeadlessQA( - chat_id=chat_id, - model=self.model, - max_tokens=self.max_tokens, - temperature=self.temperature, - streaming=self.streaming, - prompt_id=self.prompt_id, - ).generate_stream(chat_id, question) - - response_tokens = [] - async for value in headlesss_answer: - streamed_chat_history.assistant = value - response_tokens.append(value) - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - if save_answer: - chat_service.update_message_by_id( - message_id=str(streamed_chat_history.message_id), - user_message=question.question, - assistant="".join(response_tokens), - ) - - tools = [] - available_functions = {} - - connected_brains_details = {} - for brain_id in connected_brains: - brain = brain_service.get_brain_by_id(brain_id) - if brain == None: - continue - - tools.append(format_brain_to_tool(brain)) - - available_functions[brain_id] = self.get_answer_generator_from_brain_type( - brain - ) - - connected_brains_details[str(brain.id)] = brain - - CHOOSE_BRAIN_FROM_TOOLS_PROMPT = ( - "Based on the provided user content, find the most appropriate tools to answer" - + "If you can't find any tool to answer and only then, and if you can answer without using any tool. In that case, let the user know that you are not using any particular brain (i.e tool) " - ) - - messages = [{"role": "system", "content": CHOOSE_BRAIN_FROM_TOOLS_PROMPT}] - - history = chat_service.get_chat_history(self.chat_id) - - for message in history: - formatted_message = [ - {"role": "user", "content": message.user_message}, - {"role": "assistant", "content": message.assistant}, - ] - if message.assistant is None: - print(message) - messages.extend(formatted_message) - - messages.append({"role": "user", "content": question.question}) - - initial_response = completion( - model="gpt-3.5-turbo-0125", - stream=True, - messages=messages, - tools=tools, - tool_choice="auto", - ) - - response_tokens = [] - tool_calls_aggregate = [] - for chunk in initial_response: - content = chunk.choices[0].delta.content - if content is not None: - # Need to store it ? - streamed_chat_history.assistant = content - response_tokens.append(chunk.choices[0].delta.content) - - if save_answer: - yield f"data: {json.dumps(streamed_chat_history.dict())}" - else: - yield f"🧠<' {chunk.choices[0].delta.content}" - - if ( - "tool_calls" in chunk.choices[0].delta - and chunk.choices[0].delta.tool_calls is not None - ): - tool_calls = chunk.choices[0].delta.tool_calls - for tool_call in tool_calls: - id = tool_call.id - name = tool_call.function.name - if id and name: - tool_calls_aggregate += [ - { - "id": tool_call.id, - "function": { - "arguments": tool_call.function.arguments, - "name": tool_call.function.name, - }, - "type": "function", - } - ] - - else: - try: - tool_calls_aggregate[tool_call.index]["function"][ - "arguments" - ] += tool_call.function.arguments - except IndexError: - print("TOOL_CALL_INDEX error", tool_call.index) - print("TOOL_CALLS_AGGREGATE error", tool_calls_aggregate) - - finish_reason = chunk.choices[0].finish_reason - - if finish_reason == "stop": - if save_answer: - chat_service.update_message_by_id( - message_id=str(streamed_chat_history.message_id), - user_message=question.question, - assistant="".join( - [ - token - for token in response_tokens - if not token.startswith("🧠<") - ] - ), - ) - break - - if finish_reason == "tool_calls": - messages.append( - { - "role": "assistant", - "tool_calls": tool_calls_aggregate, - "content": None, - } - ) - for tool_call in tool_calls_aggregate: - function_name = tool_call["function"]["name"] - queried_brain = connected_brains_details[function_name] - function_to_call = available_functions[function_name] - function_args = json.loads(tool_call["function"]["arguments"]) - print("function_args", function_args["question"]) - question = ChatQuestion( - question=function_args["question"], brain_id=queried_brain.id - ) - - # yield f"🧠< Querying the brain {queried_brain.name} with the following arguments: {function_args} >🧠", - - print( - f"🧠< Querying the brain {queried_brain.name} with the following arguments: {function_args}", - ) - function_response = function_to_call( - chat_id=chat_id, - question=question, - save_answer=False, - ) - - messages.append( - { - "tool_call_id": tool_call["id"], - "role": "tool", - "name": function_name, - "content": function_response.assistant, - } - ) - - print("messages", messages) - - PROMPT_2 = "If the last user's question can be answered by our conversation messages since then, then give an answer and end the conversation. If you need to ask question to the user to gather more information and give a more accurate answer, then ask the question and wait for the user's answer." - # Otherwise, ask a new question to the assistant and choose brains you would like to ask questions." - - messages.append({"role": "system", "content": PROMPT_2}) - - response_after_tools_answers = completion( - model="gpt-3.5-turbo-0125", - messages=messages, - tools=tools, - tool_choice="auto", - stream=True, - ) - - response_tokens = [] - for chunk in response_after_tools_answers: - print("chunk_response_after_tools_answers", chunk) - content = chunk.choices[0].delta.content - if content: - streamed_chat_history.assistant = content - response_tokens.append(chunk.choices[0].delta.content) - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - finish_reason = chunk.choices[0].finish_reason - - if finish_reason == "stop": - chat_service.update_message_by_id( - message_id=str(streamed_chat_history.message_id), - user_message=question.question, - assistant="".join( - [ - token - for token in response_tokens - if not token.startswith("🧠<") - ] - ), - ) - break - elif finish_reason is not None: - # TODO: recursively call with tools (update prompt + create intermediary function ) - print("NO STOP") - print(chunk.choices[0]) diff --git a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py index 7f1d1e3e0811..0143fe9c2a89 100644 --- a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py +++ b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py @@ -1,4 +1,3 @@ -from typing import Dict from uuid import UUID from fastapi import APIRouter, Depends, HTTPException, Request @@ -128,65 +127,6 @@ async def update_existing_brain( return {"message": f"Brain {brain_id} has been updated."} -@brain_router.put( - "/brains/{brain_id}/secrets-values", - dependencies=[], - tags=["Brain"], -) -async def update_existing_brain_secrets( - brain_id: UUID, - secrets: Dict[str, str], - current_user: UserIdentity = Depends(get_current_user), -): - """Update an existing brain's secrets.""" - - existing_brain = brain_service.get_brain_details(brain_id, None) - - if existing_brain is None: - raise HTTPException(status_code=404, detail="Brain not found") - - if ( - existing_brain.brain_definition is None - or existing_brain.brain_definition.secrets is None - ): - raise HTTPException( - status_code=400, - detail="This brain does not support secrets.", - ) - - is_brain_user = ( - brain_user_service.get_brain_for_user( - user_id=current_user.id, - brain_id=brain_id, - ) - is not None - ) - - if not is_brain_user: - raise HTTPException( - status_code=403, - detail="You are not authorized to update this brain.", - ) - - secrets_names = [secret.name for secret in existing_brain.brain_definition.secrets] - - for key, value in secrets.items(): - if key not in secrets_names: - raise HTTPException( - status_code=400, - detail=f"Secret {key} is not a valid secret.", - ) - if value: - brain_service.update_secret_value( - user_id=current_user.id, - brain_id=brain_id, - secret_name=key, - secret_value=value, - ) - - return {"message": f"Brain {brain_id} has been updated."} - - @brain_router.post( "/brains/{brain_id}/documents", tags=["Brain"], diff --git a/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py b/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py index 61d46fd204b2..1b60b3f3af54 100644 --- a/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py +++ b/backend/core/quivr_core/api/modules/brain/entity/integration_brain.py @@ -40,7 +40,7 @@ class IntegrationEntity(BaseModel): id: int user_id: str brain_id: str - integration_id: str + integration_id: UUID settings: Optional[dict] = None credentials: Optional[dict] = None last_synced: str diff --git a/backend/core/quivr_core/api/modules/brain/qa_headless.py b/backend/core/quivr_core/api/modules/brain/qa_headless.py deleted file mode 100644 index 0ea9cad03681..000000000000 --- a/backend/core/quivr_core/api/modules/brain/qa_headless.py +++ /dev/null @@ -1,270 +0,0 @@ -import asyncio -import json -from typing import AsyncIterable, Awaitable, List, Optional -from uuid import UUID - -from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler -from langchain.chains import LLMChain -from langchain.chat_models.base import BaseChatModel -from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate -from langchain_community.chat_models import ChatLiteLLM -from pydantic import BaseModel, ConfigDict - -from quivr_core.api.logger import get_logger -from quivr_core.api.models.settings import ( - BrainSettings, -) - -# Importing settings related to the 'brain' -from quivr_core.api.modules.brain.qa_interface import QAInterface -from quivr_core.api.modules.brain.service.utils.format_chat_history import ( - format_chat_history, - format_history_to_openai_mesages, -) -from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( - get_prompt_to_use_id, -) -from quivr_core.api.modules.chat.dto.chats import ChatQuestion -from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory -from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.api.modules.chat.service.chat_service import ChatService -from quivr_core.api.modules.dependencies import get_service -from quivr_core.api.modules.prompt.service.get_prompt_to_use import get_prompt_to_use - -logger = get_logger(__name__) -SYSTEM_MESSAGE = "Your name is Quivr. You're a helpful assistant. If you don't know the answer, just say that you don't know, don't try to make up an answer.When answering use markdown or any other techniques to display the content in a nice and aerated way." -chat_service = get_service(ChatService)() - - -class HeadlessQA(BaseModel, QAInterface): - brain_settings = BrainSettings() - model: str - temperature: float = 0.0 - max_tokens: int = 2000 - streaming: bool = False - chat_id: str - callbacks: Optional[List[AsyncIteratorCallbackHandler]] = None - prompt_id: Optional[UUID] = None - - def _determine_streaming(self, streaming: bool) -> bool: - """If the model name allows for streaming and streaming is declared, set streaming to True.""" - return streaming - - def _determine_callback_array( - self, streaming - ) -> List[AsyncIteratorCallbackHandler]: - """If streaming is set, set the AsyncIteratorCallbackHandler as the only callback.""" - if streaming: - return [AsyncIteratorCallbackHandler()] - else: - return [] - - def __init__(self, **data): - super().__init__(**data) - self.streaming = self._determine_streaming(self.streaming) - self.callbacks = self._determine_callback_array(self.streaming) - - @property - def prompt_to_use(self) -> str: - return get_prompt_to_use(None, self.prompt_id) - - @property - def prompt_to_use_id(self) -> Optional[UUID]: - return get_prompt_to_use_id(None, self.prompt_id) - - def _create_llm( - self, - model, - temperature=0, - streaming=False, - callbacks=None, - ) -> BaseChatModel: - """ - Determine the language model to be used. - :param model: Language model name to be used. - :param streaming: Whether to enable streaming of the model - :param callbacks: Callbacks to be used for streaming - :return: Language model instance - """ - api_base = None - if self.brain_settings.ollama_api_base_url and model.startswith("ollama"): - api_base = self.brain_settings.ollama_api_base_url - - return ChatLiteLLM( - temperature=temperature, - model=model, - streaming=streaming, - verbose=True, - callbacks=callbacks, - max_tokens=self.max_tokens, - api_base=api_base, - ) - - def _create_prompt_template(self): - messages = [ - HumanMessagePromptTemplate.from_template("{question}"), - ] - CHAT_PROMPT = ChatPromptTemplate.from_messages(messages) - return CHAT_PROMPT - - def generate_answer( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> GetChatHistoryOutput: - # Move format_chat_history to chat service ? - transformed_history = format_chat_history( - chat_service.get_chat_history(self.chat_id) - ) - prompt_content = ( - self.prompt_to_use.content if self.prompt_to_use else SYSTEM_MESSAGE - ) - - messages = format_history_to_openai_mesages( - transformed_history, prompt_content, question.question - ) - answering_llm = self._create_llm( - model=self.model, - streaming=False, - callbacks=self.callbacks, - ) - model_prediction = answering_llm.predict_messages(messages) - answer = model_prediction.content - if save_answer: - new_chat = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "brain_id": None, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "message_time": new_chat.message_time, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": None, - "message_id": new_chat.message_id, - } - ) - else: - return GetChatHistoryOutput( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": answer, - "message_time": None, - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": None, - "message_id": None, - } - ) - - async def generate_stream( - self, chat_id: UUID, question: ChatQuestion, save_answer: bool = True - ) -> AsyncIterable: - callback = AsyncIteratorCallbackHandler() - self.callbacks = [callback] - - transformed_history = format_chat_history( - chat_service.get_chat_history(self.chat_id) - ) - prompt_content = ( - self.prompt_to_use.content if self.prompt_to_use else SYSTEM_MESSAGE - ) - - messages = format_history_to_openai_mesages( - transformed_history, prompt_content, question.question - ) - answering_llm = self._create_llm( - model=self.model, - streaming=True, - callbacks=self.callbacks, - ) - - CHAT_PROMPT = ChatPromptTemplate.from_messages(messages) - headlessChain = LLMChain(llm=answering_llm, prompt=CHAT_PROMPT) - - response_tokens = [] - - async def wrap_done(fn: Awaitable, event: asyncio.Event): - try: - await fn - except Exception as e: - logger.error(f"Caught exception: {e}") - finally: - event.set() - - run = asyncio.create_task( - wrap_done( - headlessChain.acall({}), - callback.done, - ), - ) - - if save_answer: - streamed_chat_history = chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": chat_id, - "user_message": question.question, - "assistant": "", - "brain_id": None, - "prompt_id": self.prompt_to_use_id, - } - ) - ) - - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": streamed_chat_history.message_id, - "message_time": streamed_chat_history.message_time, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": None, - } - ) - else: - streamed_chat_history = GetChatHistoryOutput( - **{ - "chat_id": str(chat_id), - "message_id": None, - "message_time": None, - "user_message": question.question, - "assistant": "", - "prompt_title": ( - self.prompt_to_use.title if self.prompt_to_use else None - ), - "brain_name": None, - } - ) - - async for token in callback.aiter(): - response_tokens.append(token) - streamed_chat_history.assistant = token - yield f"data: {json.dumps(streamed_chat_history.dict())}" - - await run - assistant = "".join(response_tokens) - - if save_answer: - chat_service.update_message_by_id( - message_id=str(streamed_chat_history.message_id), - user_message=question.question, - assistant=assistant, - ) - - model_config = ConfigDict(arbitrary_types_allowed=True) diff --git a/backend/core/quivr_core/api/modules/brain/qa_interface.py b/backend/core/quivr_core/api/modules/brain/qa_interface.py deleted file mode 100644 index 7eac8497582c..000000000000 --- a/backend/core/quivr_core/api/modules/brain/qa_interface.py +++ /dev/null @@ -1,58 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - -from quivr_core.api.modules.chat.dto.chats import ChatQuestion - - -def model_compatible_with_function_calling(model: str): - return model in [ - "gpt-4o", - "gpt-4-turbo", - "gpt-4-turbo-2024-04-09", - "gpt-4-turbo-preview", - "gpt-4-0125-preview", - "gpt-4-1106-preview", - "gpt-4", - "gpt-4-0613", - "gpt-3.5-turbo", - "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-1106", - "gpt-3.5-turbo-0613", - ] - - -class QAInterface(ABC): - """ - Abstract class for all QA interfaces. - This can be used to implement custom answer generation logic. - """ - - @abstractmethod - def calculate_pricing(self): - raise NotImplementedError( - "calculate_pricing is an abstract method and must be implemented" - ) - - @abstractmethod - def generate_answer( - self, - chat_id: UUID, - question: ChatQuestion, - save_answer: bool, - *custom_params: tuple, - ): - raise NotImplementedError( - "generate_answer is an abstract method and must be implemented" - ) - - @abstractmethod - async def generate_stream( - self, - chat_id: UUID, - question: ChatQuestion, - save_answer: bool, - *custom_params: tuple, - ): - raise NotImplementedError( - "generate_stream is an abstract method and must be implemented" - ) diff --git a/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py b/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py deleted file mode 100644 index 47c5e313843c..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/api_brain_definitions.py +++ /dev/null @@ -1,57 +0,0 @@ -from typing import Optional -from uuid import UUID - -from quivr_core.api.models.settings import get_supabase_client -from quivr_core.api.modules.brain.dto.inputs import CreateApiBrainDefinition -from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( - ApiBrainDefinitionEntity, -) - - -class ApiBrainDefinitions: - def __init__(self): - self.db = get_supabase_client() - - def get_api_brain_definition( - self, brain_id: UUID - ) -> Optional[ApiBrainDefinitionEntity]: - response = ( - self.db.table("api_brain_definition") - .select("*") - .filter("brain_id", "eq", str(brain_id)) - .execute() - ) - if len(response.data) == 0: - return None - - return ApiBrainDefinitionEntity(**response.data[0]) - - def add_api_brain_definition( - self, brain_id: UUID, api_brain_definition: CreateApiBrainDefinition - ) -> Optional[ApiBrainDefinitionEntity]: - response = ( - self.db.table("api_brain_definition") - .insert([{"brain_id": str(brain_id), **api_brain_definition.dict()}]) - .execute() - ) - if len(response.data) == 0: - return None - return ApiBrainDefinitionEntity(**response.data[0]) - - def update_api_brain_definition( - self, brain_id: UUID, api_brain_definition: ApiBrainDefinitionEntity - ) -> Optional[ApiBrainDefinitionEntity]: - response = ( - self.db.table("api_brain_definition") - .update(api_brain_definition.dict(exclude={"brain_id"})) - .filter("brain_id", "eq", str(brain_id)) - .execute() - ) - if len(response.data) == 0: - return None - return ApiBrainDefinitionEntity(**response.data[0]) - - def delete_api_brain_definition(self, brain_id: UUID) -> None: - self.db.table("api_brain_definition").delete().filter( - "brain_id", "eq", str(brain_id) - ).execute() diff --git a/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py b/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py deleted file mode 100644 index 34cede7f14d6..000000000000 --- a/backend/core/quivr_core/api/modules/brain/repository/external_api_secrets.py +++ /dev/null @@ -1,57 +0,0 @@ -from uuid import UUID - -from quivr_core.api.models.settings import get_supabase_client - - -def build_secret_unique_name(user_id: UUID, brain_id: UUID, secret_name: str): - return f"{user_id}-{brain_id}-{secret_name}" - - -class ExternalApiSecrets: - def __init__(self): - supabase_client = get_supabase_client() - self.db = supabase_client - - def create_secret( - self, user_id: UUID, brain_id: UUID, secret_name: str, secret_value - ) -> UUID | None: - response = self.db.rpc( - "insert_secret", - { - "name": build_secret_unique_name( - user_id=user_id, brain_id=brain_id, secret_name=secret_name - ), - "secret": secret_value, - }, - ).execute() - - return response.data - - def read_secret( - self, - user_id: UUID, - brain_id: UUID, - secret_name: str, - ) -> UUID | None: - response = self.db.rpc( - "read_secret", - { - "secret_name": build_secret_unique_name( - user_id=user_id, brain_id=brain_id, secret_name=secret_name - ), - }, - ).execute() - - return response.data - - def delete_secret(self, user_id: UUID, brain_id: UUID, secret_name: str) -> bool: - response = self.db.rpc( - "delete_secret", - { - "secret_name": build_secret_unique_name( - user_id=user_id, brain_id=brain_id, secret_name=secret_name - ), - }, - ).execute() - - return response.data diff --git a/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py b/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py index aa00d32ce572..9e39dc486b7f 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py +++ b/backend/core/quivr_core/api/modules/brain/repository/integration_brains.py @@ -1,4 +1,5 @@ from typing import List +from uuid import UUID from quivr_core.api.models.settings import get_supabase_client from quivr_core.api.modules.brain.entity.integration_brain import ( @@ -102,11 +103,11 @@ class IntegrationDescription: def __init__(self): self.db = get_supabase_client() - def get_integration_description(self, integration_id): + def get_integration_description(self, integration_id: UUID): response = ( self.db.table("integrations") .select("*") - .filter("id", "eq", integration_id) + .filter("id", "eq", str(integration_id)) .execute() ) if len(response.data) == 0: @@ -114,12 +115,14 @@ def get_integration_description(self, integration_id): return IntegrationDescriptionEntity(**response.data[0]) - def get_integration_description_by_user_brain_id(self, brain_id, user_id): + def get_integration_description_by_user_brain_id( + self, brain_id: UUID, user_id: UUID + ): response = ( self.db.table("integrations_user") .select("*") - .filter("brain_id", "eq", brain_id) - .filter("user_id", "eq", user_id) + .filter("brain_id", "eq", str(brain_id)) + .filter("user_id", "eq", str(user_id)) .execute() ) if len(response.data) == 0: diff --git a/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py b/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py deleted file mode 100644 index b1349307ef1b..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/api_brain_definition_service.py +++ /dev/null @@ -1,36 +0,0 @@ -from typing import Optional -from uuid import UUID - -from quivr_core.api.modules.brain.dto.inputs import CreateApiBrainDefinition -from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( - ApiBrainDefinitionEntity, -) -from quivr_core.api.modules.brain.repository.api_brain_definitions import ( - ApiBrainDefinitions, -) - - -class ApiBrainDefinitionService: - - def __init__(self): - self.repository = ApiBrainDefinitions() - - def add_api_brain_definition( - self, brain_id: UUID, api_brain_definition: CreateApiBrainDefinition - ) -> None: - self.repository.add_api_brain_definition(brain_id, api_brain_definition) - - def delete_api_brain_definition(self, brain_id: UUID) -> None: - self.repository.delete_api_brain_definition(brain_id) - - def get_api_brain_definition( - self, brain_id: UUID - ) -> Optional[ApiBrainDefinitionEntity]: - return self.repository.get_api_brain_definition(brain_id) - - def update_api_brain_definition( - self, brain_id: UUID, api_brain_definition: ApiBrainDefinitionEntity - ) -> Optional[ApiBrainDefinitionEntity]: - return self.repository.update_api_brain_definition( - brain_id, api_brain_definition - ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_service.py index f57b70c23f36..12db36d95f3f 100644 --- a/backend/core/quivr_core/api/modules/brain/service/brain_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/brain_service.py @@ -3,7 +3,6 @@ from fastapi import HTTPException -from quivr_core.api.celery_config import celery from quivr_core.api.logger import get_logger from quivr_core.api.modules.brain.dto.inputs import ( BrainUpdatableProperties, @@ -22,20 +21,12 @@ IntegrationBrain, IntegrationDescription, ) -from quivr_core.api.modules.brain.repository.external_api_secrets import ( - ExternalApiSecrets, -) -from quivr_core.api.modules.brain.service.api_brain_definition_service import ( - ApiBrainDefinitionService, -) -from quivr_core.api.modules.brain.service.utils.validate_brain import validate_api_brain from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore logger = get_logger(__name__) knowledge_service = KnowledgeService() -api_brain_definition_service = ApiBrainDefinitionService() class BrainService: @@ -46,7 +37,6 @@ def __init__(self): self.brain_vector = BrainsVectors() self.integration_brains_repository = IntegrationBrain() self.integration_description_repository = IntegrationDescription() - self.external_api_secrets_repository = ExternalApiSecrets() def get_brain_by_id(self, brain_id: UUID): return self.brain_repository.get_brain_by_id(brain_id) @@ -124,60 +114,12 @@ def create_brain( if brain is None: brain = CreateBrainProperties() # type: ignore model and brain_definition - if brain.brain_type == BrainType.api: - validate_api_brain(brain) - return self.create_brain_api(user_id, brain) - - if brain.brain_type == BrainType.composite: - return self.create_brain_composite(brain) - if brain.brain_type == BrainType.integration: return self.create_brain_integration(user_id, brain) created_brain = self.brain_repository.create_brain(brain) return created_brain - def create_brain_api( - self, - user_id: UUID, - brain: CreateBrainProperties, - ) -> BrainEntity: - created_brain = self.brain_repository.create_brain(brain) - - if brain.brain_definition is not None: - api_brain_definition_service.add_api_brain_definition( - brain_id=created_brain.brain_id, - api_brain_definition=brain.brain_definition, - ) - - secrets_values = brain.brain_secrets_values - - if secrets_values: - for secret_name in secrets_values: - self.external_api_secrets_repository.create_secret( - user_id=user_id, - brain_id=created_brain.brain_id, - secret_name=secret_name, - secret_value=secrets_values[secret_name], - ) - - return created_brain - - def create_brain_composite( - self, - brain: CreateBrainProperties, - ) -> BrainEntity: - created_brain = self.brain_repository.create_brain(brain) - - if brain.connected_brains_ids is not None: - for connected_brain_id in brain.connected_brains_ids: - self.composite_brains_connections_repository.connect_brain( - composite_brain_id=created_brain.brain_id, - connected_brain_id=connected_brain_id, - ) - - return created_brain - def create_brain_integration( self, user_id: UUID, @@ -191,50 +133,14 @@ def create_brain_integration( integration_id=brain.integration.integration_id, settings=brain.integration.settings, ) - if ( - self.integration_description_repository.get_integration_description( - brain.integration.integration_id - ).integration_name.lower() - == "notion" - ): - celery.send_task( - "NotionConnectorLoad", - kwargs={"brain_id": created_brain.brain_id, "user_id": user_id}, - ) return created_brain - def delete_brain_secrets_values(self, brain_id: UUID) -> None: - brain_definition = api_brain_definition_service.get_api_brain_definition( - brain_id=brain_id - ) - - if brain_definition is None: - raise HTTPException(status_code=404, detail="Brain definition not found.") - - secrets = brain_definition.secrets - - if len(secrets) > 0: - brain_users = self.brain_user_repository.get_brain_users(brain_id=brain_id) - for user in brain_users: - for secret in secrets: - self.external_api_secrets_repository.delete_secret( - user_id=user.user_id, - brain_id=brain_id, - secret_name=secret.name, - ) - def delete_brain(self, brain_id: UUID) -> dict[str, str]: brain_to_delete = self.get_brain_by_id(brain_id=brain_id) if brain_to_delete is None: raise HTTPException(status_code=404, detail="Brain not found.") - if brain_to_delete.brain_type == BrainType.api: - self.delete_brain_secrets_values( - brain_id=brain_id, - ) - api_brain_definition_service.delete_api_brain_definition(brain_id=brain_id) - else: - knowledge_service.remove_brain_all_knowledge(brain_id) + knowledge_service.remove_brain_all_knowledge(brain_id) self.brain_vector.delete_brain_vector(str(brain_id)) self.brain_user_repository.delete_brain_users(str(brain_id)) @@ -275,35 +181,6 @@ def update_brain_by_id( detail=f"Brain with id {brain_id} not found", ) - if ( - brain_update_answer.brain_type == BrainType.api - and brain_new_values.brain_definition - ): - existing_brain_secrets_definition = ( - existing_brain.brain_definition.secrets - if existing_brain.brain_definition - else None - ) - brain_new_values_secrets_definition = ( - brain_new_values.brain_definition.secrets - if brain_new_values.brain_definition - else None - ) - should_remove_existing_secrets_values = ( - existing_brain_secrets_definition - and brain_new_values_secrets_definition - and existing_brain_secrets_definition - != brain_new_values_secrets_definition - ) - - if should_remove_existing_secrets_values: - self.delete_brain_secrets_values(brain_id=brain_id) - - api_brain_definition_service.update_api_brain_definition( - brain_id, - api_brain_definition=brain_new_values.brain_definition, - ) - if brain_update_answer is None: raise HTTPException( status_code=404, @@ -340,30 +217,5 @@ def get_brain_details( return brain - def get_connected_brains(self, brain_id: UUID) -> list[BrainEntity]: - return self.composite_brains_connections_repository.get_connected_brains( - brain_id - ) - def get_public_brains(self) -> list[PublicBrain]: return self.brain_repository.get_public_brains() - - def update_secret_value( - self, - user_id: UUID, - brain_id: UUID, - secret_name: str, - secret_value: str, - ) -> None: - """Update an existing secret.""" - self.external_api_secrets_repository.delete_secret( - user_id=user_id, - brain_id=brain_id, - secret_name=secret_name, - ) - self.external_api_secrets_repository.create_secret( - user_id=user_id, - brain_id=brain_id, - secret_name=secret_name, - secret_value=secret_value, - ) diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py index 86c76917ecfd..ac528e262ae0 100644 --- a/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/brain_user_service.py @@ -2,28 +2,21 @@ from uuid import UUID from fastapi import HTTPException + from quivr_core.api.logger import get_logger from quivr_core.api.modules.brain.entity.brain_entity import ( BrainEntity, - BrainType, BrainUser, MinimalUserBrainEntity, RoleEnum, ) from quivr_core.api.modules.brain.repository.brains import Brains from quivr_core.api.modules.brain.repository.brains_users import BrainsUsers -from quivr_core.api.modules.brain.repository.external_api_secrets import ( - ExternalApiSecrets, -) -from quivr_core.api.modules.brain.service.api_brain_definition_service import ( - ApiBrainDefinitionService, -) from quivr_core.api.modules.brain.service.brain_service import BrainService logger = get_logger(__name__) brain_service = BrainService() -api_brain_definition_service = ApiBrainDefinitionService() class BrainUserService: @@ -31,7 +24,6 @@ class BrainUserService: def __init__(self): self.brain_repository = Brains() self.brain_user_repository = BrainsUsers() - self.external_api_secrets_repository = ExternalApiSecrets() def get_user_default_brain(self, user_id: UUID) -> BrainEntity | None: brain_id = self.brain_user_repository.get_user_default_brain_id(user_id) @@ -46,22 +38,6 @@ def delete_brain_user(self, user_id: UUID, brain_id: UUID) -> None: if brain_to_delete_user_from is None: raise HTTPException(status_code=404, detail="Brain not found.") - if brain_to_delete_user_from.brain_type == BrainType.api: - brain_definition = api_brain_definition_service.get_api_brain_definition( - brain_id=brain_id - ) - if brain_definition is None: - raise HTTPException( - status_code=404, detail="Brain definition not found." - ) - secrets = brain_definition.secrets - for secret in secrets: - self.external_api_secrets_repository.delete_secret( - user_id=user_id, - brain_id=brain_id, - secret_name=secret.name, - ) - self.brain_user_repository.delete_brain_user_by_id( user_id=user_id, brain_id=brain_id, diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py index 90929c79d88a..bed5e3a82de2 100644 --- a/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/brain_vector_service.py @@ -1,11 +1,7 @@ -from typing import Any, List from uuid import UUID from quivr_core.api.logger import get_logger from quivr_core.api.modules.brain.repository.brains_vectors import BrainsVectors -from quivr_core.api.modules.brain.repository.interfaces.brains_vectors_interface import ( - BrainsVectorsInterface, -) from quivr_core.api.modules.knowledge.repository.storage import Storage from quivr_core.api.packages.embeddings.vectors import get_unique_files_from_vector_ids @@ -13,13 +9,11 @@ class BrainVectorService: - repository: BrainsVectorsInterface - id: UUID - files: List[Any] = [] def __init__(self, brain_id: UUID): self.repository = BrainsVectors() self.id = brain_id + self.files = [] def create_brain_vector(self, vector_id, file_sha1): return self.repository.create_brain_vector(self.id, vector_id, file_sha1) # type: ignore @@ -27,7 +21,7 @@ def create_brain_vector(self, vector_id, file_sha1): def update_brain_with_file(self, file_sha1: str): # not used vector_ids = self.repository.get_vector_ids_from_file_sha1(file_sha1) - if vector_ids == None or len(vector_ids) == 0: + if vector_ids is None or len(vector_ids) == 0: logger.info(f"No vector ids found for file {file_sha1}") return diff --git a/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py b/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py deleted file mode 100644 index b497a67e3431..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/call_brain_api.py +++ /dev/null @@ -1,115 +0,0 @@ -from uuid import UUID - -import requests -from fastapi import HTTPException - -from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( - ApiBrainDefinitionSchema, -) -from quivr_core.api.modules.brain.service.api_brain_definition_service import ( - ApiBrainDefinitionService, -) -from quivr_core.api.modules.brain.service.brain_service import BrainService - -logger = get_logger(__name__) -brain_service = BrainService() -api_brain_definition_service = ApiBrainDefinitionService() - - -def get_api_call_response_as_text( - method, api_url, params, search_params, secrets -) -> str | None: - headers = {} - - api_url_with_search_params = api_url - if search_params: - api_url_with_search_params += "?" - for search_param in search_params: - api_url_with_search_params += ( - f"{search_param}={search_params[search_param]}&" - ) - - for secret in secrets: - headers[secret] = secrets[secret] - - try: - if method in ["GET", "DELETE"]: - response = requests.request( - method, - url=api_url_with_search_params, - params=params or None, - headers=headers or None, - ) - elif method in ["POST", "PUT", "PATCH"]: - response = requests.request( - method, - url=api_url_with_search_params, - json=params or None, - headers=headers or None, - ) - else: - raise ValueError(f"Invalid method: {method}") - - return response.text - - except Exception as e: - logger.error(f"Error calling API: {e}") - return None - - -def extract_api_brain_definition_values_from_llm_output( - brain_schema: ApiBrainDefinitionSchema, arguments: dict -) -> dict: - params_values = {} - properties = brain_schema.properties - required_values = brain_schema.required - for property in properties: - if property.name in arguments: - if property.type == "number": - params_values[property.name] = float(arguments[property.name]) - else: - params_values[property.name] = arguments[property.name] - continue - - if property.name in required_values: - raise HTTPException( - status_code=400, - detail=f"Required parameter {property.name} not found in arguments", - ) - - return params_values - - -def call_brain_api(brain_id: UUID, user_id: UUID, arguments: dict) -> str | None: - brain_definition = api_brain_definition_service.get_api_brain_definition(brain_id) - - if brain_definition is None: - raise HTTPException( - status_code=404, detail=f"Brain definition {brain_id} not found" - ) - - brain_params_values = extract_api_brain_definition_values_from_llm_output( - brain_definition.params, arguments - ) - - brain_search_params_values = extract_api_brain_definition_values_from_llm_output( - brain_definition.search_params, arguments - ) - - secrets = brain_definition.secrets - secrets_values = {} - - for secret in secrets: - secret_value = brain_service.external_api_secrets_repository.read_secret( - user_id=user_id, brain_id=brain_id, secret_name=secret.name - ) - secrets_values[secret.name] = secret_value - - return get_api_call_response_as_text( - api_url=brain_definition.url, - params=brain_params_values, - search_params=brain_search_params_values, - secrets=secrets_values, - method=brain_definition.method, - ) diff --git a/backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py b/backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py deleted file mode 100644 index a5c90f9a209e..000000000000 --- a/backend/core/quivr_core/api/modules/brain/service/get_api_brain_definition_as_json_schema.py +++ /dev/null @@ -1,65 +0,0 @@ -import re - -from fastapi import HTTPException - -from quivr_core.api.modules.brain.entity.api_brain_definition_entity import ( - ApiBrainDefinitionSchemaProperty, -) -from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity -from quivr_core.api.modules.brain.service.api_brain_definition_service import ( - ApiBrainDefinitionService, -) - -api_brain_definition_service = ApiBrainDefinitionService() - - -def sanitize_function_name(string): - sanitized_string = re.sub(r"[^a-zA-Z0-9_-]", "", string) - - return sanitized_string - - -def format_api_brain_property(property: ApiBrainDefinitionSchemaProperty): - property_data: dict = { - "type": property.type, - "description": property.description, - } - if property.enum: - property_data["enum"] = property.enum - return property_data - - -def get_api_brain_definition_as_json_schema(brain: BrainEntity): - api_brain_definition = api_brain_definition_service.get_api_brain_definition( - brain.id - ) - if not api_brain_definition: - raise HTTPException( - status_code=404, detail=f"Brain definition {brain.id} not found" - ) - - required = [] - required.extend(api_brain_definition.params.required) - required.extend(api_brain_definition.search_params.required) - properties = {} - - api_properties = ( - api_brain_definition.params.properties - + api_brain_definition.search_params.properties - ) - - for property in api_properties: - properties[property.name] = format_api_brain_property(property) - - parameters = { - "type": "object", - "properties": properties, - "required": required, - } - schema = { - "name": sanitize_function_name(brain.name), - "description": brain.description, - "parameters": parameters, - } - - return schema diff --git a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py index f1abfc032cc1..717a6655c20d 100644 --- a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py +++ b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py @@ -51,6 +51,7 @@ def get_question_context_from_brain(brain_id: UUID, question: str) -> list[str]: if document.metadata["file_sha1"] not in file_sha1s: file_sha1s.append(document.metadata["file_sha1"]) file_path_in_storage = f"{brain_id}/{document.metadata['file_name']}" + signed_url = generate_file_signed_url(file_path_in_storage) answers.append( DocumentAnswer( file_name=document.metadata["file_name"], @@ -58,9 +59,7 @@ def get_question_context_from_brain(brain_id: UUID, question: str) -> list[str]: file_size=document.metadata["file_size"], file_id=document.metadata["id"], file_similarity=document.metadata["similarity"], - file_url=generate_file_signed_url(file_path_in_storage).get( - "signedURL", "" - ), + file_url=signed_url.get("signedURL", "") if signed_url else "", ), ) diff --git a/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py b/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py index 7fef86900d4e..4ce8569e67f0 100644 --- a/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/integration_brain_service.py @@ -4,13 +4,9 @@ from quivr_core.api.modules.brain.repository.integration_brains import ( IntegrationDescription, ) -from quivr_core.api.modules.brain.repository.interfaces import ( - IntegrationDescriptionInterface, -) class IntegrationBrainDescriptionService: - repository: IntegrationDescriptionInterface def __init__(self): self.repository = IntegrationDescription() @@ -20,12 +16,12 @@ def get_all_integration_descriptions(self) -> list[IntegrationDescriptionEntity] def get_integration_description( self, integration_id - ) -> IntegrationDescriptionEntity: + ) -> IntegrationDescriptionEntity | None: return self.repository.get_integration_description(integration_id) def get_integration_description_by_user_brain_id( self, brain_id, user_id - ) -> IntegrationDescriptionEntity: + ) -> IntegrationDescriptionEntity | None: return self.repository.get_integration_description_by_user_brain_id( brain_id, user_id ) diff --git a/backend/core/quivr_core/api/modules/brain/service/test_brain_service.py b/backend/core/quivr_core/api/modules/brain/tests/test_brain_service.py similarity index 100% rename from backend/core/quivr_core/api/modules/brain/service/test_brain_service.py rename to backend/core/quivr_core/api/modules/brain/tests/test_brain_service.py From 08637027e1f565f533b330a8f6e9eada66690e5e Mon Sep 17 00:00:00 2001 From: aminediro Date: Fri, 28 Jun 2024 17:50:05 +0200 Subject: [PATCH 10/20] removed email logic --- .../chat/repository/chats_interface.py | 93 ------------------- .../api/packages/emails/__init__.py | 0 .../api/packages/emails/send_email.py | 11 --- 3 files changed, 104 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/chat/repository/chats_interface.py delete mode 100644 backend/core/quivr_core/api/packages/emails/__init__.py delete mode 100644 backend/core/quivr_core/api/packages/emails/send_email.py diff --git a/backend/core/quivr_core/api/modules/chat/repository/chats_interface.py b/backend/core/quivr_core/api/modules/chat/repository/chats_interface.py deleted file mode 100644 index aff734cd5c9e..000000000000 --- a/backend/core/quivr_core/api/modules/chat/repository/chats_interface.py +++ /dev/null @@ -1,93 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional -from uuid import UUID - -from quivr_core.api.modules.chat.dto.inputs import ( - ChatMessageProperties, - CreateChatHistory, - QuestionAndAnswer, -) -from quivr_core.api.modules.chat.entity.chat import Chat - - -class ChatsInterface(ABC): - @abstractmethod - def create_chat(self, new_chat): - """ - Insert a chat entry in "chats" db - """ - pass - - @abstractmethod - def get_chat_by_id(self, chat_id: str): - """ - Get chat details by chat_id - """ - pass - - @abstractmethod - def add_question_and_answer( - self, chat_id: UUID, question_and_answer: QuestionAndAnswer - ) -> Optional[Chat]: - """ - Add a question and answer to the chat history - """ - pass - - @abstractmethod - def get_chat_history(self, chat_id: str): - """ - Get chat history by chat_id - """ - pass - - @abstractmethod - def get_user_chats(self, user_id: str): - """ - Get all chats for a user - """ - pass - - @abstractmethod - def update_chat_history(self, chat_history: CreateChatHistory): - """ - Update chat history - """ - pass - - @abstractmethod - def update_chat(self, chat_id, updates): - """ - Update chat details - """ - pass - - @abstractmethod - def update_message_by_id(self, message_id, updates): - """ - Update message details - """ - pass - - @abstractmethod - def delete_chat(self, chat_id): - """ - Delete chat - """ - pass - - @abstractmethod - def delete_chat_history(self, chat_id): - """ - Delete chat history - """ - pass - - @abstractmethod - def update_chat_message( - self, chat_id, message_id, chat_message_properties: ChatMessageProperties - ): - """ - Update chat message - """ - pass diff --git a/backend/core/quivr_core/api/packages/emails/__init__.py b/backend/core/quivr_core/api/packages/emails/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/packages/emails/send_email.py b/backend/core/quivr_core/api/packages/emails/send_email.py deleted file mode 100644 index 2d2b7639aae0..000000000000 --- a/backend/core/quivr_core/api/packages/emails/send_email.py +++ /dev/null @@ -1,11 +0,0 @@ -from typing import Dict - -import resend - -from quivr_core.models.settings import ResendSettings - - -def send_email(params: Dict): - settings = ResendSettings() - resend.api_key = settings.resend_api_key - return resend.Emails.send(params) From 96cad09d7c564e0865039791be1d82f53b9779f2 Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 10:22:35 +0200 Subject: [PATCH 11/20] user service --- .../core/quivr_core/api/models/settings.py | 5 +- .../user/controller/user_controller.py | 36 +------ .../api/modules/user/repository/__init__.py | 2 +- .../api/modules/user/repository/users.py | 53 ++++++---- .../user/repository/users_interface.py | 63 ------------ .../api/modules/user/service/user_service.py | 12 +-- .../api/modules/user/service/user_usage.py | 99 ------------------- 7 files changed, 48 insertions(+), 222 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/user/repository/users_interface.py delete mode 100644 backend/core/quivr_core/api/modules/user/service/user_usage.py diff --git a/backend/core/quivr_core/api/models/settings.py b/backend/core/quivr_core/api/models/settings.py index 8425a1144195..d3bb3bf9248a 100644 --- a/backend/core/quivr_core/api/models/settings.py +++ b/backend/core/quivr_core/api/models/settings.py @@ -7,11 +7,10 @@ from langchain_openai import OpenAIEmbeddings from posthog import Posthog from pydantic_settings import BaseSettings, SettingsConfigDict -from sqlalchemy import Engine, create_engine -from supabase.client import Client, create_client - from quivr_core.api.logger import get_logger from quivr_core.api.models.databases.supabase.supabase import SupabaseDB +from sqlalchemy import Engine, create_engine +from supabase.client import Client, create_client logger = get_logger(__name__) diff --git a/backend/core/quivr_core/api/modules/user/controller/user_controller.py b/backend/core/quivr_core/api/modules/user/controller/user_controller.py index 737b08e6dacd..53eec7766d60 100644 --- a/backend/core/quivr_core/api/modules/user/controller/user_controller.py +++ b/backend/core/quivr_core/api/modules/user/controller/user_controller.py @@ -1,18 +1,16 @@ from fastapi import APIRouter, Depends, Request - -from quivr_core.api.middlewares.auth import AuthBearer, get_current_user from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService +from quivr_core.api.modules.dependencies import get_current_user from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from quivr_core.api.modules.user.repository.users import Users -from quivr_core.api.modules.user.service.user_usage import UserUsage +from quivr_core.api.modules.user.repository.users import UserRepository user_router = APIRouter() brain_user_service = BrainUserService() -user_repository = Users() +user_repository = UserRepository() -@user_router.get("/user", dependencies=[Depends(AuthBearer())], tags=["User"]) +@user_router.get("/user", tags=["User"]) async def get_user_endpoint( request: Request, current_user: UserIdentity = Depends(get_current_user) ): @@ -28,16 +26,7 @@ async def get_user_endpoint( information about the user's API usage. """ - user_daily_usage = UserUsage( - id=current_user.id, - email=current_user.email, - ) user_settings = user_daily_usage.get_user_settings() - max_brain_size = user_settings.get("max_brain_size", 1000000000) - - monthly_chat_credit = user_settings.get("monthly_chat_credit", 10) - - user_daily_usage = UserUsage(id=current_user.id) return { "email": current_user.email, @@ -52,7 +41,6 @@ async def get_user_endpoint( @user_router.put( "/user/identity", - dependencies=[Depends(AuthBearer())], tags=["User"], ) def update_user_identity_route( @@ -69,7 +57,6 @@ def update_user_identity_route( @user_router.get( "/user/identity", - dependencies=[Depends(AuthBearer())], tags=["User"], ) def get_user_identity_route( @@ -83,7 +70,6 @@ def get_user_identity_route( @user_router.delete( "/user_data", - dependencies=[Depends(AuthBearer())], tags=["User"], ) async def delete_user_data_route( @@ -100,17 +86,3 @@ async def delete_user_data_route( user_repository.delete_user_data(current_user.id) return {"message": "User deleted successfully"} - - -@user_router.get( - "/user/credits", - dependencies=[Depends(AuthBearer())], - tags=["User"], -) -def get_user_credits( - current_user: UserIdentity = Depends(get_current_user), -) -> int: - """ - Get user remaining credits. - """ - return user_repository.get_user_credits(current_user.id) diff --git a/backend/core/quivr_core/api/modules/user/repository/__init__.py b/backend/core/quivr_core/api/modules/user/repository/__init__.py index dba60627d7a8..699c705f8285 100644 --- a/backend/core/quivr_core/api/modules/user/repository/__init__.py +++ b/backend/core/quivr_core/api/modules/user/repository/__init__.py @@ -1 +1 @@ -from .users import Users +from .users import UserRepository diff --git a/backend/core/quivr_core/api/modules/user/repository/users.py b/backend/core/quivr_core/api/modules/user/repository/users.py index dbc40a411984..0ac72eb942f0 100644 --- a/backend/core/quivr_core/api/modules/user/repository/users.py +++ b/backend/core/quivr_core/api/modules/user/repository/users.py @@ -1,15 +1,15 @@ -import time +from uuid import UUID from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.dependencies import BaseRepository from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from quivr_core.api.modules.user.repository.users_interface import UsersInterface -from quivr_core.api.modules.user.service import user_usage +from sqlmodel.ext.asyncio.session import AsyncSession -class Users(UsersInterface): - def __init__(self): - supabase_client = get_supabase_client() - self.db = supabase_client +class UserRepository(BaseRepository): + def __init__(self, session: AsyncSession): + super().__init__(session) + self.db = get_supabase_client() def create_user_identity(self, id): response = ( @@ -77,7 +77,7 @@ def get_user_email_by_user_id(self, user_id): ).execute() return response.data[0]["email"] - def delete_user_data(self, user_id): + def delete_user_data(self, user_id: UUID): response = ( self.db.from_("brains_users") .select("brain_id") @@ -110,18 +110,35 @@ def delete_user_data(self, user_id): ).execute() self.db.table("users").delete().filter("id", "eq", str(user_id)).execute() - def get_user_credits(self, user_id): - user_usage_instance = user_usage.UserUsage(id=user_id) + def get_user_settings(self, user_id): + """ + Fetch the user settings from the database + """ - user_monthly_usage = user_usage_instance.get_user_monthly_usage( - time.strftime("%Y%m%d") - ) - monthly_chat_credit = ( + user_settings_response = ( self.db.from_("user_settings") - .select("monthly_chat_credit") + .select("*") .filter("user_id", "eq", str(user_id)) .execute() - .data[0]["monthly_chat_credit"] - ) + ).data + + if len(user_settings_response) == 0: + # Create the user settings + user_settings_response = ( + self.db.table("user_settings") + .insert({"user_id": str(user_id)}) + .execute() + ).data + + if len(user_settings_response) == 0: + raise ValueError("User settings could not be created") + + user_settings = user_settings_response[0] + + return user_settings - return monthly_chat_credit - user_monthly_usage + def get_models(self): + model_settings_response = (self.db.from_("models").select("*").execute()).data + if len(model_settings_response) == 0: + raise ValueError("An issue occured while fetching the model settings") + return model_settings_response diff --git a/backend/core/quivr_core/api/modules/user/repository/users_interface.py b/backend/core/quivr_core/api/modules/user/repository/users_interface.py deleted file mode 100644 index 31e7abab6ea3..000000000000 --- a/backend/core/quivr_core/api/modules/user/repository/users_interface.py +++ /dev/null @@ -1,63 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - -from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties -from quivr_core.api.modules.user.entity.user_identity import UserIdentity - - -class UsersInterface(ABC): - @abstractmethod - def create_user_identity(self, id: UUID) -> UserIdentity: - """ - Create a user identity - """ - pass - - @abstractmethod - def update_user_properties( - self, - user_id: UUID, - user_identity_updatable_properties: UserUpdatableProperties, - ) -> UserIdentity: - """ - Update the user properties - """ - pass - - @abstractmethod - def get_user_identity(self, user_id: UUID) -> UserIdentity: - """ - Get the user identity - """ - pass - - @abstractmethod - def get_user_id_by_user_email(self, email: str) -> UUID | None: - """ - Get the user id by user email - """ - pass - - @abstractmethod - def get_user_email_by_user_id(self, user_id: UUID) -> str: - """ - Get the user email by user id - """ - pass - - @abstractmethod - def delete_user_data(self, user_id: str): - """ - Delete a user. - - - `user_id`: The ID of the user to delete. - - This endpoint deletes a user from the system. - """ - - @abstractmethod - def get_user_credits(self, user_id: UUID) -> int: - """ - Get user remaining credits - """ - pass diff --git a/backend/core/quivr_core/api/modules/user/service/user_service.py b/backend/core/quivr_core/api/modules/user/service/user_service.py index a4a6a66c827a..8f83aae3cb19 100644 --- a/backend/core/quivr_core/api/modules/user/service/user_service.py +++ b/backend/core/quivr_core/api/modules/user/service/user_service.py @@ -1,14 +1,14 @@ from uuid import UUID -from quivr_core.api.modules.user.repository.users import Users -from quivr_core.api.modules.user.repository.users_interface import UsersInterface +from quivr_api.modules.dependencies import BaseService +from quivr_core.api.modules.user.repository.users import UserRepository -class UserService: - repository: UsersInterface +class UserService(BaseService[UserRepository]): + repository_cls = UserRepository - def __init__(self): - self.repository = Users() + def __init__(self, repository: UserRepository): + self.repository = repository def get_user_id_by_email(self, email: str) -> UUID | None: return self.repository.get_user_id_by_user_email(email) diff --git a/backend/core/quivr_core/api/modules/user/service/user_usage.py b/backend/core/quivr_core/api/modules/user/service/user_usage.py deleted file mode 100644 index e5543f18c041..000000000000 --- a/backend/core/quivr_core/api/modules/user/service/user_usage.py +++ /dev/null @@ -1,99 +0,0 @@ -from quivr_core.api.logger import get_logger -from quivr_core.api.models.databases.supabase.supabase import SupabaseDB -from quivr_core.api.models.settings import PostHogSettings, get_supabase_db -from quivr_core.api.modules.user.entity.user_identity import UserIdentity - -logger = get_logger(__name__) - - -class UserUsage(UserIdentity): - daily_requests_count: int = 0 - - def __init__(self, **data): - super().__init__(**data) - - @property - def supabase_db(self) -> SupabaseDB: - return get_supabase_db() - - def get_user_daily_usage(self): - """ - Fetch the user request stats from the database - """ - request = self.supabase_db.get_user_usage(self.id) - return request - - def get_models(self): - """ - Fetch the user request stats from the database - """ - request = self.supabase_db.get_models() - - return request - - def get_user_settings(self): - """ - Fetch the user settings from the database - """ - posthog = PostHogSettings() - request = self.supabase_db.get_user_settings(self.id) - if request is not None and request.get("is_premium", False): - posthog.set_once_user_properties( - self.id, "HAS_OR_HAD_PREMIUM", {"is_was_premium": "true"} - ) - posthog.set_user_properties( - self.id, "CURRENT_PREMIUM", {"is_premium": "true"} - ) - else: - posthog.set_user_properties( - self.id, "CURRENT_PREMIUM", {"is_premium": "false"} - ) - - return request - - def get_user_monthly_usage(self, date): - """ - Fetch the user monthly usage from the database - """ - posthog = PostHogSettings() - request = self.supabase_db.get_user_requests_count_for_month(self.id, date) - posthog.set_user_properties( - self.id, "MONTHLY_USAGE", {"monthly_chat_usage": request} - ) - - return request - - def handle_increment_user_request_count(self, date, number=1): - """ - Increment the user request count in the database - """ - current_requests_count = self.supabase_db.get_user_requests_count_for_month( - self.id, date - ) - - daily_requests_count = self.supabase_db.get_user_requests_count_for_day( - self.id, date - ) - - # BUG: could be a bug, we are assuming that 0 means no records ! - if daily_requests_count == 0: - logger.info("Request count is 0, creating new record") - if self.email is None: - raise ValueError("User Email should be defined for daily usage table") - self.supabase_db.create_user_daily_usage( - user_id=self.id, date=date, user_email=self.email, number=number - ) - self.daily_requests_count = number - return - - self.supabase_db.increment_user_request_count( - user_id=self.id, - date=date, - number=daily_requests_count + number, - ) - - self.daily_requests_count = current_requests_count + number - - logger.info( - f"User {self.email} request count updated to {self.daily_requests_count}" - ) From 4d7f87ea4df8749587a4d92caf2234802dd026a6 Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 11:02:08 +0200 Subject: [PATCH 12/20] updates user service and repo --- .../user/controller/user_controller.py | 36 +++++++++++-------- .../api/modules/user/repository/__init__.py | 1 - .../{users.py => user_repository.py} | 10 +++--- .../api/modules/user/service/user_service.py | 23 ++++++++++-- 4 files changed, 48 insertions(+), 22 deletions(-) rename backend/core/quivr_core/api/modules/user/repository/{users.py => user_repository.py} (95%) diff --git a/backend/core/quivr_core/api/modules/user/controller/user_controller.py b/backend/core/quivr_core/api/modules/user/controller/user_controller.py index 53eec7766d60..850aafa4ee99 100644 --- a/backend/core/quivr_core/api/modules/user/controller/user_controller.py +++ b/backend/core/quivr_core/api/modules/user/controller/user_controller.py @@ -1,18 +1,25 @@ -from fastapi import APIRouter, Depends, Request +from typing import Annotated + +from fastapi import APIRouter, Depends + +from quivr_core.api.dependencies import get_service from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService from quivr_core.api.modules.dependencies import get_current_user from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from quivr_core.api.modules.user.repository.users import UserRepository +from quivr_core.api.modules.user.service.user_service import UserService user_router = APIRouter() brain_user_service = BrainUserService() -user_repository = UserRepository() + +UserServiceDep = Annotated[UserService, Depends(get_service(UserService))] + +UserIdentityDep = Annotated[UserIdentity, Depends(get_current_user)] @user_router.get("/user", tags=["User"]) async def get_user_endpoint( - request: Request, current_user: UserIdentity = Depends(get_current_user) + current_user: UserIdentityDep, user_service: UserServiceDep ): """ Get user information and statistics. @@ -26,16 +33,13 @@ async def get_user_endpoint( information about the user's API usage. """ - user_settings = user_daily_usage.get_user_settings() + user_settings = user_service.get_user_settings(current_user.id) return { "email": current_user.email, - "max_brain_size": max_brain_size, "current_brain_size": 0, - "monthly_chat_credit": monthly_chat_credit, "models": user_settings.get("models", []), "id": current_user.id, - "is_premium": user_settings["is_premium"], } @@ -45,12 +49,13 @@ async def get_user_endpoint( ) def update_user_identity_route( user_identity_updatable_properties: UserUpdatableProperties, - current_user: UserIdentity = Depends(get_current_user), + current_user: UserIdentityDep, + user_service: UserServiceDep, ) -> UserIdentity: """ Update user identity. """ - return user_repository.update_user_properties( + return user_service.update_user_properties( current_user.id, user_identity_updatable_properties ) @@ -60,12 +65,13 @@ def update_user_identity_route( tags=["User"], ) def get_user_identity_route( - current_user: UserIdentity = Depends(get_current_user), + current_user: UserIdentityDep, + user_service: UserServiceDep, ) -> UserIdentity: """ Get user identity. """ - return user_repository.get_user_identity(current_user.id) + return user_service.get_user_identity(current_user.id) @user_router.delete( @@ -73,16 +79,16 @@ def get_user_identity_route( tags=["User"], ) async def delete_user_data_route( - current_user: UserIdentity = Depends(get_current_user), + current_user: UserIdentityDep, + user_service: UserServiceDep, ): """ Delete a user. - - `user_id`: The ID of the user to delete. This endpoint deletes a user from the system. """ - user_repository.delete_user_data(current_user.id) + user_service.delete_user_data(current_user.id) return {"message": "User deleted successfully"} diff --git a/backend/core/quivr_core/api/modules/user/repository/__init__.py b/backend/core/quivr_core/api/modules/user/repository/__init__.py index 699c705f8285..e69de29bb2d1 100644 --- a/backend/core/quivr_core/api/modules/user/repository/__init__.py +++ b/backend/core/quivr_core/api/modules/user/repository/__init__.py @@ -1 +0,0 @@ -from .users import UserRepository diff --git a/backend/core/quivr_core/api/modules/user/repository/users.py b/backend/core/quivr_core/api/modules/user/repository/user_repository.py similarity index 95% rename from backend/core/quivr_core/api/modules/user/repository/users.py rename to backend/core/quivr_core/api/modules/user/repository/user_repository.py index 0ac72eb942f0..099d9831c723 100644 --- a/backend/core/quivr_core/api/modules/user/repository/users.py +++ b/backend/core/quivr_core/api/modules/user/repository/user_repository.py @@ -1,9 +1,11 @@ from uuid import UUID +from sqlmodel.ext.asyncio.session import AsyncSession + from quivr_core.api.models.settings import get_supabase_client from quivr_core.api.modules.dependencies import BaseRepository +from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from sqlmodel.ext.asyncio.session import AsyncSession class UserRepository(BaseRepository): @@ -26,8 +28,8 @@ def create_user_identity(self, id): def update_user_properties( self, - user_id, - user_identity_updatable_properties, + user_id: UUID, + user_identity_updatable_properties: UserUpdatableProperties, ): response = ( self.db.from_("user_identity") @@ -110,7 +112,7 @@ def delete_user_data(self, user_id: UUID): ).execute() self.db.table("users").delete().filter("id", "eq", str(user_id)).execute() - def get_user_settings(self, user_id): + def get_user_settings(self, user_id: UUID): """ Fetch the user settings from the database """ diff --git a/backend/core/quivr_core/api/modules/user/service/user_service.py b/backend/core/quivr_core/api/modules/user/service/user_service.py index 8f83aae3cb19..676a24d97124 100644 --- a/backend/core/quivr_core/api/modules/user/service/user_service.py +++ b/backend/core/quivr_core/api/modules/user/service/user_service.py @@ -1,7 +1,8 @@ from uuid import UUID -from quivr_api.modules.dependencies import BaseService -from quivr_core.api.modules.user.repository.users import UserRepository +from quivr_core.api.modules.dependencies import BaseService +from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties +from quivr_core.api.modules.user.repository.user_repository import UserRepository class UserService(BaseService[UserRepository]): @@ -15,3 +16,21 @@ def get_user_id_by_email(self, email: str) -> UUID | None: def get_user_email_by_user_id(self, user_id: UUID) -> str | None: return self.repository.get_user_email_by_user_id(user_id) + + def get_user_settings(self, user_id: UUID): + return self.repository.get_user_settings(user_id) + + def get_user_identity(self, user_id: UUID): + return self.repository.get_user_identity(user_id) + + def update_user_properties( + self, + user_id: UUID, + user_identity_updatable_properties: UserUpdatableProperties, + ): + return self.repository.update_user_properties( + user_id, user_identity_updatable_properties + ) + + def delete_user_data(self, user_id: UUID): + return self.repository.delete_user_data(user_id) From 629d359382e33d0bddfc49a6fc583a85c4460494 Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 15:16:53 +0200 Subject: [PATCH 13/20] prompt service --- .../modules/brain/controller/brain_routes.py | 7 ++- .../modules/brain/repository/brains_users.py | 4 +- .../modules/chat/controller/chat_routes.py | 7 ++- .../api/modules/chat/service/chat_service.py | 4 +- .../api/modules/chat/service/utils.py | 9 --- .../prompt/controller/prompt_routes.py | 34 +++++++---- .../api/modules/prompt/entity/__init__.py | 7 --- .../api/modules/prompt/repository/prompts.py | 16 ++---- .../prompt/repository/prompts_interface.py | 57 ------------------- .../modules/prompt/service/prompt_service.py | 11 ++-- 10 files changed, 48 insertions(+), 108 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py diff --git a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py index 0143fe9c2a89..396b3d7c76f6 100644 --- a/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py +++ b/backend/core/quivr_core/api/modules/brain/controller/brain_routes.py @@ -1,3 +1,4 @@ +from typing import Annotated from uuid import UUID from fastapi import APIRouter, Depends, HTTPException, Request @@ -20,7 +21,7 @@ from quivr_core.api.modules.brain.service.integration_brain_service import ( IntegrationBrainDescriptionService, ) -from quivr_core.api.modules.dependencies import get_current_user +from quivr_core.api.modules.dependencies import get_current_user, get_service from quivr_core.api.modules.prompt.service.prompt_service import PromptService from quivr_core.api.modules.user.entity.user_identity import UserIdentity from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry @@ -28,11 +29,12 @@ logger = get_logger(__name__) brain_router = APIRouter() -prompt_service = PromptService() brain_service = BrainService() brain_user_service = BrainUserService() integration_brain_description_service = IntegrationBrainDescriptionService() +PromptServiceDep = Annotated[PromptService, Depends(get_service(PromptService))] + @brain_router.get( "/brains/integrations/", @@ -103,6 +105,7 @@ async def create_new_brain( async def update_existing_brain( brain_id: UUID, brain_update_data: BrainUpdatableProperties, + prompt_service: PromptServiceDep, current_user: UserIdentity = Depends(get_current_user), ): """Update an existing brain's configuration.""" diff --git a/backend/core/quivr_core/api/modules/brain/repository/brains_users.py b/backend/core/quivr_core/api/modules/brain/repository/brains_users.py index a420e999bdf8..29be9cfeced8 100644 --- a/backend/core/quivr_core/api/modules/brain/repository/brains_users.py +++ b/backend/core/quivr_core/api/modules/brain/repository/brains_users.py @@ -147,8 +147,8 @@ def get_user_default_brain_id(self, user_id: UUID) -> UUID | None: response = ( self.db.from_("brains_users") .select("brain_id") - .filter("user_id", "eq", user_id) - .filter("default_brain", "eq", True) + .filter("user_id", "eq", str(user_id)) + .filter("default_brain", "eq", "True") .execute() ).data if len(response) == 0: diff --git a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py index 7c57d9da6514..8c0fba1705af 100644 --- a/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py +++ b/backend/core/quivr_core/api/modules/chat/controller/chat_routes.py @@ -18,19 +18,20 @@ from quivr_core.api.modules.dependencies import get_current_user, get_service from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository from quivr_core.api.modules.prompt.service.prompt_service import PromptService +from quivr_core.api.modules.rag.rag_service import RAGService from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from quivr_core.api.packages.quivr_core.rag_service import RAGService from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry logger = get_logger(__name__) brain_service = BrainService() knowledge_service = KnowledgeRepository() -prompt_service = PromptService() ChatServiceDep = Annotated[ChatService, Depends(get_service(ChatService))] UserIdentityDep = Annotated[UserIdentity, Depends(get_current_user)] +PromptServiceDep = Annotated[PromptService, Depends(get_service(PromptService))] + chat_router = APIRouter() @@ -136,6 +137,7 @@ async def create_question_handler( chat_id: UUID, current_user: UserIdentityDep, chat_service: ChatServiceDep, + prompt_service: PromptServiceDep, brain_id: Annotated[UUID | None, Query()] = None, ): try: @@ -173,6 +175,7 @@ async def create_stream_question_handler( chat_id: UUID, chat_service: ChatServiceDep, current_user: UserIdentityDep, + prompt_service: PromptServiceDep, brain_id: Annotated[UUID | None, Query()] = None, ) -> StreamingResponse: logger.info( diff --git a/backend/core/quivr_core/api/modules/chat/service/chat_service.py b/backend/core/quivr_core/api/modules/chat/service/chat_service.py index b533be9344f6..133161430e31 100644 --- a/backend/core/quivr_core/api/modules/chat/service/chat_service.py +++ b/backend/core/quivr_core/api/modules/chat/service/chat_service.py @@ -26,11 +26,9 @@ NotificationService, ) from quivr_core.api.modules.prompt.entity.prompt import Prompt -from quivr_core.api.modules.prompt.service.prompt_service import PromptService logger = get_logger(__name__) -prompt_service = PromptService() brain_service = BrainService() notification_service = NotificationService() @@ -56,7 +54,7 @@ async def create_chat( def get_follow_up_question( self, brain_id: UUID = None, question: str = None - ) -> [str]: + ) -> list[str]: follow_up = [ "Summarize the conversation", "Explain in more detail", diff --git a/backend/core/quivr_core/api/modules/chat/service/utils.py b/backend/core/quivr_core/api/modules/chat/service/utils.py index 9191d64d361f..10a629d096c1 100644 --- a/backend/core/quivr_core/api/modules/chat/service/utils.py +++ b/backend/core/quivr_core/api/modules/chat/service/utils.py @@ -1,21 +1,12 @@ from typing import List from quivr_core.api.logger import get_logger -from quivr_core.api.modules.brain.service.brain_service import BrainService from quivr_core.api.modules.chat.dto.chats import ChatItem, ChatItemType from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput from quivr_core.api.modules.notification.entity.notification import Notification -from quivr_core.api.modules.notification.service.notification_service import ( - NotificationService, -) -from quivr_core.api.modules.prompt.service.prompt_service import PromptService logger = get_logger(__name__) -prompt_service = PromptService() -brain_service = BrainService() -notification_service = NotificationService() - def merge_chat_history_and_notifications( chat_history: List[GetChatHistoryOutput], notifications: List[Notification] diff --git a/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py b/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py index 0415be6a0e3e..f87757c25c7d 100644 --- a/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py +++ b/backend/core/quivr_core/api/modules/prompt/controller/prompt_routes.py @@ -1,7 +1,8 @@ +from typing import Annotated from uuid import UUID -from fastapi import APIRouter - +from fastapi import APIRouter, Depends +from quivr_core.api.modules.dependencies import get_service from quivr_core.api.modules.prompt.entity.prompt import ( CreatePromptProperties, Prompt, @@ -11,41 +12,52 @@ prompt_router = APIRouter() -promptService = PromptService() + +PromptServiceDep = Annotated[PromptService, Depends(get_service(PromptService))] @prompt_router.get("/prompts", tags=["Prompt"]) -async def get_prompts() -> list[Prompt]: +async def get_prompts( + prompt_service: PromptServiceDep, +) -> list[Prompt]: """ Retrieve all public prompt """ - return promptService.get_public_prompts() + return prompt_service.get_public_prompts() @prompt_router.get("/prompts/{prompt_id}", tags=["Prompt"]) -async def get_prompt(prompt_id: UUID) -> Prompt | None: +async def get_prompt( + prompt_id: UUID, + prompt_service: PromptServiceDep, +) -> Prompt | None: """ Retrieve a prompt by its id """ - return promptService.get_prompt_by_id(prompt_id) + return prompt_service.get_prompt_by_id(prompt_id) @prompt_router.put("/prompts/{prompt_id}", tags=["Prompt"]) async def update_prompt( - prompt_id: UUID, prompt: PromptUpdatableProperties + prompt_id: UUID, + prompt: PromptUpdatableProperties, + prompt_service: PromptServiceDep, ) -> Prompt | None: """ Update a prompt by its id """ - return promptService.update_prompt_by_id(prompt_id, prompt) + return prompt_service.update_prompt_by_id(prompt_id, prompt) @prompt_router.post("/prompts", tags=["Prompt"]) -async def create_prompt_route(prompt: CreatePromptProperties) -> Prompt | None: +async def create_prompt_route( + prompt: CreatePromptProperties, + prompt_service: PromptServiceDep, +) -> Prompt | None: """ Create a prompt by its id """ - return promptService.create_prompt(prompt) + return prompt_service.create_prompt(prompt) diff --git a/backend/core/quivr_core/api/modules/prompt/entity/__init__.py b/backend/core/quivr_core/api/modules/prompt/entity/__init__.py index 324aeee09ffc..e69de29bb2d1 100644 --- a/backend/core/quivr_core/api/modules/prompt/entity/__init__.py +++ b/backend/core/quivr_core/api/modules/prompt/entity/__init__.py @@ -1,7 +0,0 @@ -from .prompt import ( - CreatePromptProperties, - DeletePromptResponse, - Prompt, - PromptStatusEnum, - PromptUpdatableProperties, -) diff --git a/backend/core/quivr_core/api/modules/prompt/repository/prompts.py b/backend/core/quivr_core/api/modules/prompt/repository/prompts.py index c04d94aa10f5..bffde769dfb2 100644 --- a/backend/core/quivr_core/api/modules/prompt/repository/prompts.py +++ b/backend/core/quivr_core/api/modules/prompt/repository/prompts.py @@ -1,19 +1,15 @@ from uuid import UUID from fastapi import HTTPException - from quivr_core.api.models.settings import get_supabase_client -from quivr_core.api.modules.prompt.entity.prompt import Prompt -from quivr_core.api.modules.prompt.repository.prompts_interface import ( - DeletePromptResponse, - PromptsInterface, -) +from quivr_core.api.modules.dependencies import BaseRepository +from quivr_core.api.modules.prompt.entity.prompt import DeletePromptResponse, Prompt -class Prompts(PromptsInterface): - def __init__(self): - supabase_client = get_supabase_client() - self.db = supabase_client +class PromptRepository(BaseRepository): + def __init__(self, session): + super().__init__(session) + self.db = get_supabase_client() def create_prompt(self, prompt): """ diff --git a/backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py b/backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py deleted file mode 100644 index 9a838cc3a515..000000000000 --- a/backend/core/quivr_core/api/modules/prompt/repository/prompts_interface.py +++ /dev/null @@ -1,57 +0,0 @@ -from abc import ABC, abstractmethod -from uuid import UUID - -from quivr_core.api.modules.prompt.entity import ( - CreatePromptProperties, - DeletePromptResponse, - Prompt, - PromptUpdatableProperties, -) - - -class PromptsInterface(ABC): - @abstractmethod - def create_prompt(self, prompt: CreatePromptProperties) -> Prompt: - """ - Create a prompt - """ - pass - - @abstractmethod - def delete_prompt_by_id(self, prompt_id: UUID) -> DeletePromptResponse: - """ - Delete a prompt by id - Args: - prompt_id (UUID): The id of the prompt - - Returns: - A dictionary containing the status of the delete and prompt_id of the deleted prompt - """ - pass - - @abstractmethod - def get_prompt_by_id(self, prompt_id: UUID) -> Prompt | None: - """ - Get a prompt by its id - - Args: - prompt_id (UUID): The id of the prompt - - Returns: - Prompt: The prompt - """ - pass - - @abstractmethod - def get_public_prompts(self) -> list[Prompt]: - """ - List all public prompts - """ - pass - - @abstractmethod - def update_prompt_by_id( - self, prompt_id: UUID, prompt: PromptUpdatableProperties - ) -> Prompt: - """Update a prompt by id""" - pass diff --git a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py index 24ea0b3ee08a..ba6f733de461 100644 --- a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py +++ b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py @@ -4,20 +4,21 @@ from quivr_core.api.modules.brain.service.utils.get_prompt_to_use_id import ( get_prompt_to_use_id, ) +from quivr_core.api.modules.dependencies import BaseService from quivr_core.api.modules.prompt.entity.prompt import ( CreatePromptProperties, DeletePromptResponse, Prompt, PromptUpdatableProperties, ) -from quivr_core.api.modules.prompt.repository.prompts import Prompts +from quivr_core.api.modules.prompt.repository.prompts import PromptRepository -class PromptService: - repository: Prompts +class PromptService(BaseService[PromptRepository]): + repository: PromptRepository - def __init__(self): - self.repository = Prompts() + def __init__(self, repository: PromptRepository): + self.repository = repository def create_prompt(self, prompt: CreatePromptProperties) -> Prompt: return self.repository.create_prompt(prompt) From 49cf7e1e56fb1ae1f9fdb10f6f9cd3b09452bf26 Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 15:23:52 +0200 Subject: [PATCH 14/20] renamed to quivr --- .../api/packages/embeddings/vectors.py | 4 +- .../api/packages/files/crawl/crawler.py | 2 +- .../api/packages/files/parsers/audio.py | 4 +- .../api/packages/files/parsers/bibtex.py | 2 +- .../api/packages/files/parsers/code_python.py | 2 +- .../api/packages/files/parsers/common.py | 8 +- .../api/packages/files/parsers/csv.py | 2 +- .../api/packages/files/parsers/docx.py | 2 +- .../api/packages/files/parsers/epub.py | 2 +- .../api/packages/files/parsers/github.py | 2 +- .../api/packages/files/parsers/html.py | 2 +- .../api/packages/files/parsers/markdown.py | 2 +- .../api/packages/files/parsers/notebook.py | 2 +- .../api/packages/files/parsers/odt.py | 2 +- .../api/packages/files/parsers/pdf.py | 2 +- .../api/packages/files/parsers/powerpoint.py | 2 +- .../api/packages/files/parsers/telegram.py | 2 +- .../api/packages/files/parsers/txt.py | 2 +- .../api/packages/files/parsers/xlsx.py | 2 +- .../api/packages/files/processors.py | 4 +- .../api/packages/quivr_core/quivr_rag.py | 17 +- .../api/packages/quivr_core/rag_factory.py | 20 +- .../api/packages/quivr_core/rag_service.py | 296 ------------------ .../api/packages/quivr_core/utils.py | 8 +- .../utils/handle_request_validation_error.py | 2 +- .../api/packages/utils/telemetry.py | 2 +- 26 files changed, 47 insertions(+), 350 deletions(-) delete mode 100644 backend/core/quivr_core/api/packages/quivr_core/rag_service.py diff --git a/backend/core/quivr_core/api/packages/embeddings/vectors.py b/backend/core/quivr_core/api/packages/embeddings/vectors.py index 807b174884cc..7a6ee2a8e681 100644 --- a/backend/core/quivr_core/api/packages/embeddings/vectors.py +++ b/backend/core/quivr_core/api/packages/embeddings/vectors.py @@ -4,8 +4,8 @@ from pydantic import BaseModel -from quivr_core.logger import get_logger -from quivr_core.models.settings import ( +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import ( get_documents_vector_store, get_embedding_client, get_supabase_db, diff --git a/backend/core/quivr_core/api/packages/files/crawl/crawler.py b/backend/core/quivr_core/api/packages/files/crawl/crawler.py index b3a9bbd190fe..945968b755e9 100644 --- a/backend/core/quivr_core/api/packages/files/crawl/crawler.py +++ b/backend/core/quivr_core/api/packages/files/crawl/crawler.py @@ -5,7 +5,7 @@ from langchain_community.document_loaders import PlaywrightURLLoader from pydantic import BaseModel -from quivr_core.logger import get_logger +from quivr_core.api.logger import get_logger logger = get_logger(__name__) diff --git a/backend/core/quivr_core/api/packages/files/parsers/audio.py b/backend/core/quivr_core/api/packages/files/parsers/audio.py index 8c36d17102ab..c090bba045a0 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/audio.py +++ b/backend/core/quivr_core/api/packages/files/parsers/audio.py @@ -4,8 +4,8 @@ from langchain.schema import Document from langchain.text_splitter import RecursiveCharacterTextSplitter -from quivr_core.models.files import File -from quivr_core.models.settings import get_documents_vector_store +from quivr_core.api.models.files import File +from quivr_core.api.models.settings import get_documents_vector_store from quivr_core.packages.files.file import compute_sha1_from_content diff --git a/backend/core/quivr_core/api/packages/files/parsers/bibtex.py b/backend/core/quivr_core/api/packages/files/parsers/bibtex.py index fde358593d8c..960bc771d905 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/bibtex.py +++ b/backend/core/quivr_core/api/packages/files/parsers/bibtex.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import BibtexLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/code_python.py b/backend/core/quivr_core/api/packages/files/parsers/code_python.py index 2222be417563..3e7146be8db5 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/code_python.py +++ b/backend/core/quivr_core/api/packages/files/parsers/code_python.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import PythonLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/common.py b/backend/core/quivr_core/api/packages/files/parsers/common.py index 6d6c8a1c6536..5ffc49ad1e7c 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/common.py +++ b/backend/core/quivr_core/api/packages/files/parsers/common.py @@ -10,10 +10,10 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter from llama_parse import LlamaParse -from quivr_core.logger import get_logger -from quivr_core.models.files import File -from quivr_core.modules.brain.service.brain_vector_service import BrainVectorService -from quivr_core.modules.upload.service.upload_file import DocumentSerializable +from quivr_core.api.logger import get_logger +from quivr_core.api.models.files import File +from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.api.modules.upload.service.upload_file import DocumentSerializable from quivr_core.packages.embeddings.vectors import Neurons if not isinstance(asyncio.get_event_loop(), uvloop.Loop): diff --git a/backend/core/quivr_core/api/packages/files/parsers/csv.py b/backend/core/quivr_core/api/packages/files/parsers/csv.py index c3d8336482cc..fc440bf268bc 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/csv.py +++ b/backend/core/quivr_core/api/packages/files/parsers/csv.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import CSVLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/docx.py b/backend/core/quivr_core/api/packages/files/parsers/docx.py index d9b0fb2afb82..1766d63cc4e0 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/docx.py +++ b/backend/core/quivr_core/api/packages/files/parsers/docx.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import Docx2txtLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/epub.py b/backend/core/quivr_core/api/packages/files/parsers/epub.py index 5b3393e16187..c57374fac3f4 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/epub.py +++ b/backend/core/quivr_core/api/packages/files/parsers/epub.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders.epub import UnstructuredEPubLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/github.py b/backend/core/quivr_core/api/packages/files/parsers/github.py index 66e99d88aa79..f07af638130e 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/github.py +++ b/backend/core/quivr_core/api/packages/files/parsers/github.py @@ -5,7 +5,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import GitLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from quivr_core.packages.embeddings.vectors import Neurons from quivr_core.packages.files.file import compute_sha1_from_content diff --git a/backend/core/quivr_core/api/packages/files/parsers/html.py b/backend/core/quivr_core/api/packages/files/parsers/html.py index 98ce97095713..afe28e0ab104 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/html.py +++ b/backend/core/quivr_core/api/packages/files/parsers/html.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import UnstructuredHTMLLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/markdown.py b/backend/core/quivr_core/api/packages/files/parsers/markdown.py index d5b016489f7d..44d75b6de37e 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/markdown.py +++ b/backend/core/quivr_core/api/packages/files/parsers/markdown.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import UnstructuredMarkdownLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/notebook.py b/backend/core/quivr_core/api/packages/files/parsers/notebook.py index 86b283d714f2..16b91ba0a0e2 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/notebook.py +++ b/backend/core/quivr_core/api/packages/files/parsers/notebook.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import NotebookLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/odt.py b/backend/core/quivr_core/api/packages/files/parsers/odt.py index fdb8a4de61dd..f6b10695a8b8 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/odt.py +++ b/backend/core/quivr_core/api/packages/files/parsers/odt.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import UnstructuredPDFLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/pdf.py b/backend/core/quivr_core/api/packages/files/parsers/pdf.py index 21418a98d6ae..653f6abf39f0 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/pdf.py +++ b/backend/core/quivr_core/api/packages/files/parsers/pdf.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import UnstructuredPDFLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py b/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py index 1b1363326874..57502cb4eda6 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py +++ b/backend/core/quivr_core/api/packages/files/parsers/powerpoint.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import UnstructuredFileLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/telegram.py b/backend/core/quivr_core/api/packages/files/parsers/telegram.py index 477c0a10bb21..be64655c1f47 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/telegram.py +++ b/backend/core/quivr_core/api/packages/files/parsers/telegram.py @@ -1,4 +1,4 @@ -from quivr_core.models.files import File +from quivr_core.api.models.files import File from quivr_core.packages.files.loaders.telegram import TelegramChatFileLoader from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/txt.py b/backend/core/quivr_core/api/packages/files/parsers/txt.py index 172aeea4ec64..46734c768503 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/txt.py +++ b/backend/core/quivr_core/api/packages/files/parsers/txt.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import TextLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/parsers/xlsx.py b/backend/core/quivr_core/api/packages/files/parsers/xlsx.py index c0069ced31eb..53005f7b4ad6 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/xlsx.py +++ b/backend/core/quivr_core/api/packages/files/parsers/xlsx.py @@ -1,6 +1,6 @@ from langchain_community.document_loaders import UnstructuredExcelLoader -from quivr_core.models.files import File +from quivr_core.api.models.files import File from .common import process_file diff --git a/backend/core/quivr_core/api/packages/files/processors.py b/backend/core/quivr_core/api/packages/files/processors.py index d4922317c51e..cc7c777dee0a 100644 --- a/backend/core/quivr_core/api/packages/files/processors.py +++ b/backend/core/quivr_core/api/packages/files/processors.py @@ -1,4 +1,4 @@ -from quivr_core.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.brain_service import BrainService from .parsers.audio import process_audio from .parsers.bibtex import process_bibtex @@ -50,7 +50,7 @@ def create_response(message, type): brain_service = BrainService() -# TODO: Move filter_file to a file service to avoid circular imports from quivr_core.models/files.py for File class +# TODO: Move filter_file to a file service to avoid circular imports from quivr_core.api.models/files.py for File class def filter_file( file, brain_id, diff --git a/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py b/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py index 77e2e9e5daaa..c61e1efe5a87 100644 --- a/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py +++ b/backend/core/quivr_core/api/packages/quivr_core/quivr_rag.py @@ -14,18 +14,11 @@ from langchain_core.vectorstores import VectorStore from langchain_openai import ChatOpenAI -from quivr_core.modules.knowledge.entity.knowledge import Knowledge -from quivr_core.packages.quivr_core.config import RAGConfig -from quivr_core.packages.quivr_core.models import ( - ParsedRAGChunkResponse, - ParsedRAGResponse, - cited_answer, -) -from quivr_core.packages.quivr_core.prompts import ( - ANSWER_PROMPT, - CONDENSE_QUESTION_PROMPT, -) -from quivr_core.packages.quivr_core.utils import ( +from quivr_core.api.modules.knowledge.entity.knowledge import Knowledge +from quivr_core.config import RAGConfig +from quivr_core.models import ParsedRAGChunkResponse, ParsedRAGResponse, cited_answer +from quivr_core.prompts import ANSWER_PROMPT, CONDENSE_QUESTION_PROMPT +from quivr_core.utils import ( combine_documents, format_file_list, get_chunk_metadata, diff --git a/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py b/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py index 49d998521304..7e88652e8322 100644 --- a/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py +++ b/backend/core/quivr_core/api/packages/quivr_core/rag_factory.py @@ -1,17 +1,17 @@ from typing import Type -from quivr_core.logger import get_logger -from quivr_core.modules.brain.entity.integration_brain import IntegrationEntity -from quivr_core.modules.brain.integrations.Big.Brain import BigBrain -from quivr_core.modules.brain.integrations.GPT4.Brain import GPT4Brain -from quivr_core.modules.brain.integrations.Multi_Contract.Brain import ( +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.brain.entity.integration_brain import IntegrationEntity +from quivr_core.api.modules.brain.integrations.Big.Brain import BigBrain +from quivr_core.api.modules.brain.integrations.GPT4.Brain import GPT4Brain +from quivr_core.api.modules.brain.integrations.Multi_Contract.Brain import ( MultiContractBrain, ) -from quivr_core.modules.brain.integrations.Notion.Brain import NotionBrain -from quivr_core.modules.brain.integrations.Proxy.Brain import ProxyBrain -from quivr_core.modules.brain.integrations.Self.Brain import SelfBrain -from quivr_core.modules.brain.integrations.SQL.Brain import SQLBrain -from quivr_core.modules.brain.knowledge_brain_qa import KnowledgeBrainQA +from quivr_core.api.modules.brain.integrations.Notion.Brain import NotionBrain +from quivr_core.api.modules.brain.integrations.Proxy.Brain import ProxyBrain +from quivr_core.api.modules.brain.integrations.Self.Brain import SelfBrain +from quivr_core.api.modules.brain.integrations.SQL.Brain import SQLBrain +from quivr_core.api.modules.brain.knowledge_brain_qa import KnowledgeBrainQA logger = get_logger(__name__) diff --git a/backend/core/quivr_core/api/packages/quivr_core/rag_service.py b/backend/core/quivr_core/api/packages/quivr_core/rag_service.py deleted file mode 100644 index 10cbfd0b65e8..000000000000 --- a/backend/core/quivr_core/api/packages/quivr_core/rag_service.py +++ /dev/null @@ -1,296 +0,0 @@ -import datetime -from uuid import UUID, uuid4 - -from langchain_community.chat_models import ChatLiteLLM - -from quivr_core.logger import get_logger -from quivr_core.models.settings import ( - get_embedding_client, - get_supabase_client, - settings, -) -from quivr_core.modules.brain.entity.brain_entity import BrainEntity -from quivr_core.modules.brain.service.brain_service import BrainService -from quivr_core.modules.brain.service.utils.format_chat_history import ( - format_chat_history, -) -from quivr_core.modules.chat.controller.chat.utils import ( - compute_cost, - find_model_and_generate_metadata, - update_user_usage, -) -from quivr_core.modules.chat.dto.inputs import CreateChatHistory -from quivr_core.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.modules.chat.service.chat_service import ChatService -from quivr_core.modules.knowledge.repository.knowledges import KnowledgeRepository -from quivr_core.modules.prompt.entity.prompt import Prompt -from quivr_core.modules.prompt.service.prompt_service import PromptService -from quivr_core.modules.user.entity.user_identity import UserIdentity -from quivr_core.modules.user.service.user_usage import UserUsage -from quivr_core.packages.quivr_core.config import RAGConfig -from quivr_core.packages.quivr_core.models import ParsedRAGResponse, RAGResponseMetadata -from quivr_core.packages.quivr_core.quivr_rag import QuivrQARAG -from quivr_core.packages.quivr_core.utils import generate_source -from quivr_core.vectorstore.supabase import CustomSupabaseVectorStore - -logger = get_logger(__name__) - - -class RAGService: - def __init__( - self, - current_user: UserIdentity, - brain_id: UUID | None, - chat_id: UUID, - brain_service: BrainService, - prompt_service: PromptService, - chat_service: ChatService, - knowledge_service: KnowledgeRepository, - ): - # Services - self.brain_service = brain_service - self.prompt_service = prompt_service - self.chat_service = chat_service - self.knowledge_service = knowledge_service - - # Base models - self.current_user = current_user - self.chat_id = chat_id - self.brain = self.get_or_create_brain(brain_id, self.current_user.id) - self.prompt = self.get_brain_prompt(self.brain) - - # check at init time - self.model_to_use = self.check_and_update_user_usage( - self.current_user, self.brain - ) - - def get_brain_prompt(self, brain: BrainEntity) -> Prompt | None: - return ( - self.prompt_service.get_prompt_by_id(brain.prompt_id) - if brain.prompt_id - else None - ) - - def get_llm(self, rag_config: RAGConfig): - api_base = ( - settings.ollama_api_base_url - if settings.ollama_api_base_url and rag_config.model.startswith("ollama") - else None - ) - return ChatLiteLLM( - temperature=rag_config.temperature, - max_tokens=rag_config.max_tokens, - model=rag_config.model, - streaming=rag_config.streaming, - verbose=False, - api_base=api_base, - ) # pyright: ignore reportPrivateUsage=none - - def get_or_create_brain(self, brain_id: UUID | None, user_id: UUID) -> BrainEntity: - brain = None - if brain_id is not None: - brain = self.brain_service.get_brain_details(brain_id, user_id) - - # TODO: Create if doesn't exist - assert brain - - if brain.integration: - assert brain.integration.user_id == user_id - return brain - - def check_and_update_user_usage(self, user: UserIdentity, brain: BrainEntity): - """Check user limits and raises if user reached his limits: - 1. Raise if one of the conditions : - - User doesn't have access to brains - - Model of brain is not is user_settings.models - - Latest sum_30d(user_daily_user) < user_settings.max_monthly_usage - - Check sum(user_settings.daily_user_count)+ model_price < user_settings.monthly_chat_credits - 2. Updates user usage - """ - # TODO(@aminediro) : THIS is bug prone, should retrieve it from DB here - user_usage = UserUsage(id=user.id, email=user.email) - user_settings = user_usage.get_user_settings() - all_models = user_usage.get_models() - - # TODO(@aminediro): refactor this function - model_to_use = find_model_and_generate_metadata( - brain.model, - user_settings, - all_models, - ) - cost = compute_cost(model_to_use, all_models) - # Raises HTTP if user usage exceeds limits - update_user_usage(user_usage, user_settings, cost) # noqa: F821 - return model_to_use - - def create_vector_store( - self, brain_id: UUID, max_input: int - ) -> CustomSupabaseVectorStore: - supabase_client = get_supabase_client() - embeddings = get_embedding_client() - return CustomSupabaseVectorStore( - supabase_client, - embeddings, - table_name="vectors", - brain_id=brain_id, - max_input=max_input, - ) - - def save_answer(self, question: str, answer: ParsedRAGResponse): - return self.chat_service.update_chat_history( - CreateChatHistory( - **{ - "chat_id": self.chat_id, - "user_message": question, - "assistant": answer.answer, - "brain_id": self.brain.brain_id, - # TODO: prompt_id should always be not None - "prompt_id": self.prompt.id if self.prompt else None, - "metadata": answer.metadata.model_dump() if answer.metadata else {}, - } - ) - ) - - async def generate_answer( - self, - question: str, - ): - logger.info( - f"Creating question for chat {self.chat_id} with brain {self.brain.brain_id} " - ) - rag_config = RAGConfig( - model=self.model_to_use.name, - temperature=self.brain.temperature, - max_input=self.model_to_use.max_input, - max_tokens=self.brain.max_tokens, - prompt=self.prompt.content if self.prompt else None, - streaming=False, - ) - history = await self.chat_service.get_chat_history(self.chat_id) - # Get list of files - list_files = self.knowledge_service.get_all_knowledge_in_brain( - self.brain.brain_id - ) - # Build RAG dependencies to inject - vector_store = self.create_vector_store( - self.brain.brain_id, rag_config.max_input - ) - llm = self.get_llm(rag_config) - # Initialize the RAG pipline - rag_pipeline = QuivrQARAG( - rag_config=rag_config, llm=llm, vector_store=vector_store - ) - # Format the history, sanitize the input - transformed_history = format_chat_history(history) - - parsed_response = rag_pipeline.answer(question, transformed_history, list_files) - - # Save the answer to db - new_chat_entry = self.save_answer(question, parsed_response) - - # Format output to be correct - return GetChatHistoryOutput( - **{ - "chat_id": self.chat_id, - "user_message": question, - "assistant": parsed_response.answer, - "message_time": new_chat_entry.message_time, - "prompt_title": (self.prompt.title if self.prompt else None), - "brain_name": self.brain.name if self.brain else None, - "message_id": new_chat_entry.message_id, - "brain_id": str(self.brain.brain_id) if self.brain else None, - "metadata": ( - parsed_response.metadata.model_dump() - if parsed_response.metadata - else {} - ), - } - ) - - async def generate_answer_stream( - self, - question: str, - ): - logger.info( - f"Creating question for chat {self.chat_id} with brain {self.brain.brain_id} " - ) - # Build the rag config - rag_config = RAGConfig( - model=self.model_to_use.name, - temperature=self.brain.temperature, - max_input=self.model_to_use.max_input, - max_tokens=self.brain.max_tokens, - prompt=self.prompt.content if self.prompt else "", - streaming=True, - ) - # Getting chat history - history = await self.chat_service.get_chat_history(self.chat_id) - # Format the history, sanitize the input - transformed_history = format_chat_history(history) - - # Get list of files urls - # TODO: Why do we get ALL the files ? - list_files = self.knowledge_service.get_all_knowledge_in_brain( - self.brain.brain_id - ) - llm = self.get_llm(rag_config) - vector_store = self.create_vector_store( - self.brain.brain_id, rag_config.max_input - ) - # Initialize the rag pipline - rag_pipeline = QuivrQARAG( - rag_config=rag_config, llm=llm, vector_store=vector_store - ) - - full_answer = "" - - message_metadata = { - "chat_id": self.chat_id, - "message_id": uuid4(), # do we need it ?, - "user_message": question, # TODO: define result - "message_time": datetime.datetime.now(), # TODO: define result - "prompt_title": (self.prompt.title if self.prompt else ""), - "brain_name": self.brain.name if self.brain else None, - "brain_id": self.brain.brain_id if self.brain else None, - } - - async for response in rag_pipeline.answer_astream( - question, transformed_history, list_files - ): - # Format output to be correct servicedf;j - if not response.last_chunk: - streamed_chat_history = GetChatHistoryOutput( - assistant=response.answer, - metadata=response.metadata.model_dump(), - **message_metadata, - ) - full_answer += response.answer - yield f"data: {streamed_chat_history.model_dump_json()}" - - # For last chunk parse the sources, and the full answer - streamed_chat_history = GetChatHistoryOutput( - assistant=response.answer, - metadata=response.metadata.model_dump(), - **message_metadata, - ) - - sources_urls = generate_source( - response.metadata.sources, - self.brain.brain_id, - ( - streamed_chat_history.metadata["citations"] - if streamed_chat_history.metadata - else None - ), - ) - if streamed_chat_history.metadata: - streamed_chat_history.metadata["sources"] = sources_urls - - self.save_answer( - question, - ParsedRAGResponse( - answer=full_answer, - metadata=RAGResponseMetadata(**streamed_chat_history.metadata), - ), - ) - yield f"data: {streamed_chat_history.model_dump_json()}" diff --git a/backend/core/quivr_core/api/packages/quivr_core/utils.py b/backend/core/quivr_core/api/packages/quivr_core/utils.py index 8bff2ae9bcaa..101b147725a7 100644 --- a/backend/core/quivr_core/api/packages/quivr_core/utils.py +++ b/backend/core/quivr_core/api/packages/quivr_core/utils.py @@ -11,10 +11,10 @@ ) from langchain_core.messages.ai import AIMessageChunk -from quivr_core.modules.chat.dto.chats import Sources -from quivr_core.modules.chat.dto.outputs import GetChatHistoryOutput -from quivr_core.modules.knowledge.entity.knowledge import Knowledge -from quivr_core.modules.upload.service.generate_file_signed_url import ( +from quivr_core.api.modules.chat.dto.chats import Sources +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.knowledge.entity.knowledge import Knowledge +from quivr_core.api.modules.upload.service.generate_file_signed_url import ( generate_file_signed_url, ) from quivr_core.packages.quivr_core.models import ( diff --git a/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py b/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py index 57e2d0ad06f8..454f9f8ef25b 100644 --- a/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py +++ b/backend/core/quivr_core/api/packages/utils/handle_request_validation_error.py @@ -2,7 +2,7 @@ from fastapi.exceptions import RequestValidationError from fastapi.responses import JSONResponse -from quivr_core.logger import get_logger +from quivr_core.api.logger import get_logger logger = get_logger(__name__) diff --git a/backend/core/quivr_core/api/packages/utils/telemetry.py b/backend/core/quivr_core/api/packages/utils/telemetry.py index a644d646e4f0..c8e45baf3ee8 100644 --- a/backend/core/quivr_core/api/packages/utils/telemetry.py +++ b/backend/core/quivr_core/api/packages/utils/telemetry.py @@ -6,7 +6,7 @@ import httpx from fastapi import Request -from quivr_core.logger import get_logger +from quivr_core.api.logger import get_logger logger = get_logger(__name__) From 1068d469bce6b8bfcae38775b6ec043d52542768 Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 15:24:35 +0200 Subject: [PATCH 15/20] renamed models --- .../models/brains_subscription_invitations.py | 14 -- .../api/models/databases/__init__.py | 0 .../api/models/databases/llm_models.py | 13 -- .../api/models/databases/repository.py | 85 ------------ .../api/models/databases/supabase/__init__.py | 8 -- .../brains_subscription_invitations.py | 40 ------ .../api/models/databases/supabase/supabase.py | 21 --- .../models/databases/supabase/user_usage.py | 128 ------------------ .../supabase/files.py => file_repository.py} | 20 +-- backend/core/quivr_core/api/models/files.py | 14 +- .../quivr_core/api/models/files_in_storage.py | 16 --- .../core/quivr_core/api/models/settings.py | 25 +--- .../api/models/sqlalchemy_repository.py | 73 ---------- .../{databases/supabase => }/vectors.py | 14 +- 14 files changed, 30 insertions(+), 441 deletions(-) delete mode 100644 backend/core/quivr_core/api/models/brains_subscription_invitations.py delete mode 100644 backend/core/quivr_core/api/models/databases/__init__.py delete mode 100644 backend/core/quivr_core/api/models/databases/llm_models.py delete mode 100644 backend/core/quivr_core/api/models/databases/repository.py delete mode 100644 backend/core/quivr_core/api/models/databases/supabase/__init__.py delete mode 100644 backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py delete mode 100644 backend/core/quivr_core/api/models/databases/supabase/supabase.py delete mode 100644 backend/core/quivr_core/api/models/databases/supabase/user_usage.py rename backend/core/quivr_core/api/models/{databases/supabase/files.py => file_repository.py} (50%) delete mode 100644 backend/core/quivr_core/api/models/files_in_storage.py delete mode 100644 backend/core/quivr_core/api/models/sqlalchemy_repository.py rename backend/core/quivr_core/api/models/{databases/supabase => }/vectors.py (88%) diff --git a/backend/core/quivr_core/api/models/brains_subscription_invitations.py b/backend/core/quivr_core/api/models/brains_subscription_invitations.py deleted file mode 100644 index 862610ccd080..000000000000 --- a/backend/core/quivr_core/api/models/brains_subscription_invitations.py +++ /dev/null @@ -1,14 +0,0 @@ -from uuid import UUID - -from pydantic import BaseModel, ConfigDict - -from quivr_core.api.logger import get_logger - -logger = get_logger(__name__) - - -class BrainSubscription(BaseModel): - brain_id: UUID - email: str - rights: str = "Viewer" - model_config = ConfigDict(arbitrary_types_allowed=True) diff --git a/backend/core/quivr_core/api/models/databases/__init__.py b/backend/core/quivr_core/api/models/databases/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/models/databases/llm_models.py b/backend/core/quivr_core/api/models/databases/llm_models.py deleted file mode 100644 index 00722817ca77..000000000000 --- a/backend/core/quivr_core/api/models/databases/llm_models.py +++ /dev/null @@ -1,13 +0,0 @@ -from pydantic import BaseModel - - -class LLMModel(BaseModel): - """LLM models stored in the database that are allowed to be used by the users. - Args: - BaseModel (BaseModel): Pydantic BaseModel - """ - - name: str = "gpt-3.5-turbo-0125" - price: int = 1 - max_input: int = 512 - max_output: int = 512 diff --git a/backend/core/quivr_core/api/models/databases/repository.py b/backend/core/quivr_core/api/models/databases/repository.py deleted file mode 100644 index 1b19d64ff552..000000000000 --- a/backend/core/quivr_core/api/models/databases/repository.py +++ /dev/null @@ -1,85 +0,0 @@ -from abc import ABC, abstractmethod -from datetime import datetime -from uuid import UUID - -from .llm_models import LLMModel - - -class Repository(ABC): - @abstractmethod - def create_user_daily_usage(self, user_id: UUID, user_email: str, date: datetime): - pass - - @abstractmethod - def get_user_usage(self, user_id: UUID): - pass - - @abstractmethod - def get_models(self) -> LLMModel | None: - pass - - @abstractmethod - def get_user_requests_count_for_month(self, user_id: UUID, date: datetime): - pass - - @abstractmethod - def update_user_request_count(self, user_id: UUID, date: str): - pass - - @abstractmethod - def increment_user_request_count( - self, user_id: UUID, date: str, current_request_count - ): - pass - - @abstractmethod - def set_file_vectors_ids(self, file_sha1: str): - pass - - @abstractmethod - def get_brain_vectors_by_brain_id_and_file_sha1( - self, brain_id: UUID, file_sha1: str - ): - pass - - @abstractmethod - def create_subscription_invitation( - self, brain_id: UUID, user_email: str, rights: str - ): - pass - - @abstractmethod - def update_subscription_invitation( - self, brain_id: UUID, user_email: str, rights: str - ): - pass - - @abstractmethod - def get_subscription_invitations_by_brain_id_and_email( - self, brain_id: UUID, user_email: str - ): - pass - - @abstractmethod - def get_vectors_by_file_name(self, file_name: str): - pass - - @abstractmethod - def similarity_search(self, query_embedding, table: str, k: int, threshold: float): - pass - - @abstractmethod - def update_summary(self, document_id: UUID, summary_id: int): - pass - - @abstractmethod - def get_vectors_by_batch(self, batch_id: UUID): - pass - - @abstractmethod - def get_vectors_in_batch(self, batch_ids): - pass - - @abstractmethod - def get_vectors_by_file_sha1(self, file_sha1): - pass diff --git a/backend/core/quivr_core/api/models/databases/supabase/__init__.py b/backend/core/quivr_core/api/models/databases/supabase/__init__.py deleted file mode 100644 index aa1798c69935..000000000000 --- a/backend/core/quivr_core/api/models/databases/supabase/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from quivr_core.api.models.databases.supabase.brains_subscription_invitations import ( - BrainSubscription, -) -from quivr_core.api.models.databases.supabase.files import File -from quivr_core.api.models.databases.supabase.user_usage import UserUsage -from quivr_core.api.models.databases.supabase.vectors import Vector - -__all__ = ["BrainSubscription", "File", "UserUsage", "Vector"] diff --git a/backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py b/backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py deleted file mode 100644 index d2ce6a639c66..000000000000 --- a/backend/core/quivr_core/api/models/databases/supabase/brains_subscription_invitations.py +++ /dev/null @@ -1,40 +0,0 @@ -from quivr_core.api.logger import get_logger -from quivr_core.api.models.databases.repository import Repository - -logger = get_logger(__name__) - - -class BrainSubscription(Repository): - def __init__(self, supabase_client): - self.db = supabase_client - - def create_subscription_invitation(self, brain_id, user_email, rights): - logger.info("Creating subscription invitation") - response = ( - self.db.table("brain_subscription_invitations") - .insert({"brain_id": str(brain_id), "email": user_email, "rights": rights}) - .execute() - ) - return response.data - - def update_subscription_invitation(self, brain_id, user_email, rights): - logger.info("Updating subscription invitation") - response = ( - self.db.table("brain_subscription_invitations") - .update({"rights": rights}) - .eq("brain_id", str(brain_id)) - .eq("email", user_email) - .execute() - ) - return response.data - - def get_subscription_invitations_by_brain_id_and_email(self, brain_id, user_email): - response = ( - self.db.table("brain_subscription_invitations") - .select("*") - .eq("brain_id", str(brain_id)) - .eq("email", user_email) - .execute() - ) - - return response diff --git a/backend/core/quivr_core/api/models/databases/supabase/supabase.py b/backend/core/quivr_core/api/models/databases/supabase/supabase.py deleted file mode 100644 index 5c4f40b30e44..000000000000 --- a/backend/core/quivr_core/api/models/databases/supabase/supabase.py +++ /dev/null @@ -1,21 +0,0 @@ -from quivr_core.api.models.databases.supabase import ( - BrainSubscription, - File, - UserUsage, - Vector, -) - - -# TODO: REMOVE THIS CLASS ! -class SupabaseDB( - UserUsage, - File, - BrainSubscription, - Vector, -): - def __init__(self, supabase_client): - self.db = supabase_client - UserUsage.__init__(self, supabase_client) - File.__init__(self, supabase_client) - BrainSubscription.__init__(self, supabase_client) - Vector.__init__(self, supabase_client) diff --git a/backend/core/quivr_core/api/models/databases/supabase/user_usage.py b/backend/core/quivr_core/api/models/databases/supabase/user_usage.py deleted file mode 100644 index c10f834c7752..000000000000 --- a/backend/core/quivr_core/api/models/databases/supabase/user_usage.py +++ /dev/null @@ -1,128 +0,0 @@ -from datetime import datetime, timedelta -from uuid import UUID - -from quivr_core.api.logger import get_logger -from quivr_core.api.models.databases.repository import Repository - -logger = get_logger(__name__) - - -# TODO: change the name of this class because another one already exists -class UserUsage(Repository): - def __init__(self, supabase_client): - self.db = supabase_client - - def create_user_daily_usage( - self, user_id: UUID, user_email: str, date: datetime, number: int = 1 - ): - return ( - self.db.table("user_daily_usage") - .insert( - { - "user_id": str(user_id), - "email": user_email, - "date": date, - "daily_requests_count": number, - } - ) - .execute() - ) - - def get_user_settings(self, user_id): - """ - Fetch the user settings from the database - """ - - user_settings_response = ( - self.db.from_("user_settings") - .select("*") - .filter("user_id", "eq", str(user_id)) - .execute() - ).data - - if len(user_settings_response) == 0: - # Create the user settings - user_settings_response = ( - self.db.table("user_settings") - .insert({"user_id": str(user_id)}) - .execute() - ).data - - if len(user_settings_response) == 0: - raise ValueError("User settings could not be created") - - user_settings = user_settings_response[0] - - return user_settings - - def get_models(self): - model_settings_response = (self.db.from_("models").select("*").execute()).data - if len(model_settings_response) == 0: - raise ValueError("An issue occured while fetching the model settings") - return model_settings_response - - def get_user_monthly(self, user_id): - pass - - def get_user_usage(self, user_id): - """ - Fetch the user request stats from the database - """ - requests_stats = ( - self.db.from_("user_daily_usage") - .select("*") - .filter("user_id", "eq", user_id) - .execute() - ) - return requests_stats.data - - def get_user_requests_count_for_day(self, user_id, date): - """ - Fetch the user request count from the database - """ - response = ( - self.db.from_("user_daily_usage") - .select("daily_requests_count") - .filter("user_id", "eq", user_id) - .filter("date", "eq", date) - .execute() - ).data - - if response and len(response) > 0: - return response[0]["daily_requests_count"] - return 0 - - def get_user_requests_count_for_month(self, user_id, date): - """ - Fetch the user request count from the database - """ - date_30_days_ago = (datetime.now() - timedelta(days=30)).strftime("%Y%m%d") - - response = ( - self.db.from_("user_daily_usage") - .select("daily_requests_count") - .filter("user_id", "eq", user_id) - .filter("date", "gte", date_30_days_ago) - .execute() - ).data - - if response and len(response) > 0: - return sum(row["daily_requests_count"] for row in response) - return 0 - - def increment_user_request_count(self, user_id, date, number: int = 1): - """ - Increment the user's requests count for a specific day - """ - - self.update_user_request_count(user_id, daily_requests_count=number, date=date) - - def update_user_request_count(self, user_id, daily_requests_count, date): - response = ( - self.db.table("user_daily_usage") - .update({"daily_requests_count": daily_requests_count}) - .match({"user_id": user_id, "date": date}) - .execute() - ) - - return response diff --git a/backend/core/quivr_core/api/models/databases/supabase/files.py b/backend/core/quivr_core/api/models/file_repository.py similarity index 50% rename from backend/core/quivr_core/api/models/databases/supabase/files.py rename to backend/core/quivr_core/api/models/file_repository.py index 6298b1cc0926..3fc263ae0b17 100644 --- a/backend/core/quivr_core/api/models/databases/supabase/files.py +++ b/backend/core/quivr_core/api/models/file_repository.py @@ -1,27 +1,31 @@ -from quivr_core.api.models.databases.repository import Repository +from uuid import UUID +from quivr_core.api.models.settings import get_supabase_client -class File(Repository): - def __init__(self, supabase_client): - self.db = supabase_client - def set_file_vectors_ids(self, file_sha1): +class FileRepository: + def __init__(self): + self.db = get_supabase_client() + + def set_file_vectors_ids(self, file_sha1: bytes): response = ( self.db.table("vectors") .select("id") - .filter("file_sha1", "eq", file_sha1) + .filter("file_sha1", "eq", str(file_sha1)) .execute() ) return response.data - def get_brain_vectors_by_brain_id_and_file_sha1(self, brain_id, file_sha1): + def get_brain_vectors_by_brain_id_and_file_sha1( + self, brain_id: UUID, file_sha1: bytes + ): self.set_file_vectors_ids(file_sha1) # Check if file exists in that brain response = ( self.db.table("brains_vectors") .select("brain_id, vector_id") .filter("brain_id", "eq", str(brain_id)) - .filter("file_sha1", "eq", file_sha1) + .filter("file_sha1", "eq", str(file_sha1)) .execute() ) diff --git a/backend/core/quivr_core/api/models/files.py b/backend/core/quivr_core/api/models/files.py index afad981976e1..8f0e6938989f 100644 --- a/backend/core/quivr_core/api/models/files.py +++ b/backend/core/quivr_core/api/models/files.py @@ -6,8 +6,8 @@ from pydantic import BaseModel from quivr_core.api.logger import get_logger -from quivr_core.api.models.databases.supabase.supabase import SupabaseDB -from quivr_core.api.models.settings import get_supabase_db +from quivr_core.api.models.file_repository import FileRepository +from quivr_core.api.models.vectors import Vector from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService from quivr_core.api.packages.files.file import compute_sha1_from_content @@ -28,12 +28,10 @@ class File(BaseModel): def __init__(self, **data): super().__init__(**data) + self.vectors_repository = Vector() + self.file_repository = FileRepository() data["file_sha1"] = compute_sha1_from_content(data["bytes_content"]) - @property - def supabase_db(self) -> SupabaseDB: - return get_supabase_db() - def compute_documents(self, loader_class): """ Compute the documents from the file @@ -56,7 +54,7 @@ def set_file_vectors_ids(self): Set the vectors_ids property with the ids of the vectors that are associated with the file in the vectors table """ - self.vectors_ids = self.supabase_db.get_vectors_by_file_sha1( + self.vectors_ids = self.vectors_repository.get_vectors_by_file_sha1( self.file_sha1 ).data @@ -79,7 +77,7 @@ def file_already_exists_in_brain(self, brain_id): Args: brain_id (str): Brain id """ - response = self.supabase_db.get_brain_vectors_by_brain_id_and_file_sha1( + response = self.file_repository.get_brain_vectors_by_brain_id_and_file_sha1( brain_id, self.file_sha1, # type: ignore ) diff --git a/backend/core/quivr_core/api/models/files_in_storage.py b/backend/core/quivr_core/api/models/files_in_storage.py deleted file mode 100644 index 05e4d908cfa9..000000000000 --- a/backend/core/quivr_core/api/models/files_in_storage.py +++ /dev/null @@ -1,16 +0,0 @@ -from uuid import UUID - -from pydantic import BaseModel - - -class FileInStorage(BaseModel): - Id: UUID - Key: str - - @property - def id(self) -> UUID: - return self.Id - - @property - def key(self) -> str: - return self.Key diff --git a/backend/core/quivr_core/api/models/settings.py b/backend/core/quivr_core/api/models/settings.py index d3bb3bf9248a..f35385c306b1 100644 --- a/backend/core/quivr_core/api/models/settings.py +++ b/backend/core/quivr_core/api/models/settings.py @@ -1,23 +1,20 @@ from typing import Optional from uuid import UUID +from dotenv import load_dotenv from langchain.embeddings.base import Embeddings from langchain_community.embeddings.ollama import OllamaEmbeddings from langchain_community.vectorstores.supabase import SupabaseVectorStore from langchain_openai import OpenAIEmbeddings from posthog import Posthog from pydantic_settings import BaseSettings, SettingsConfigDict -from quivr_core.api.logger import get_logger -from quivr_core.api.models.databases.supabase.supabase import SupabaseDB from sqlalchemy import Engine, create_engine from supabase.client import Client, create_client -logger = get_logger(__name__) - +from quivr_core.api.logger import get_logger -class BrainRateLimiting(BaseSettings): - model_config = SettingsConfigDict(validate_default=False) - max_brain_per_user: int = 5 +load_dotenv() +logger = get_logger(__name__) # The `PostHogSettings` class is used to initialize and interact with the PostHog analytics service. @@ -120,14 +117,8 @@ class BrainSettings(BaseSettings): langfuse_secret_key: str | None = None -class ResendSettings(BaseSettings): - model_config = SettingsConfigDict(validate_default=False) - resend_api_key: str = "null" - - # Global variables to store the Supabase client and database instances _supabase_client: Optional[Client] = None -_supabase_db: Optional[SupabaseDB] = None _db_engine: Optional[Engine] = None _embedding_service = None @@ -160,14 +151,6 @@ def get_supabase_client() -> Client: return _supabase_client -def get_supabase_db() -> SupabaseDB: - global _supabase_db - if _supabase_db is None: - logger.info("Creating Supabase DB") - _supabase_db = SupabaseDB(get_supabase_client()) - return _supabase_db - - def get_embedding_client() -> Embeddings: global _embedding_service if settings.ollama_api_base_url: diff --git a/backend/core/quivr_core/api/models/sqlalchemy_repository.py b/backend/core/quivr_core/api/models/sqlalchemy_repository.py deleted file mode 100644 index 7b295187973a..000000000000 --- a/backend/core/quivr_core/api/models/sqlalchemy_repository.py +++ /dev/null @@ -1,73 +0,0 @@ -from datetime import datetime -from uuid import uuid4 - -from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import relationship - -Base = declarative_base() - - -class User(Base): - __tablename__ = "users" - - user_id = Column(String, primary_key=True) - email = Column(String) - date = Column(DateTime) - daily_requests_count = Column(Integer) - - -class Brain(Base): - __tablename__ = "brains" - - brain_id = Column(Integer, primary_key=True) - name = Column(String) - users = relationship("BrainUser", back_populates="brain") - vectors = relationship("BrainVector", back_populates="brain") - - -class BrainUser(Base): - __tablename__ = "brains_users" - - id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey("users.user_id")) - brain_id = Column(Integer, ForeignKey("brains.brain_id")) - rights = Column(String) - - user = relationship("User") - brain = relationship("Brain", back_populates="users") - - -class BrainVector(Base): - __tablename__ = "brains_vectors" - - vector_id = Column(String, primary_key=True, default=lambda: str(uuid4())) - brain_id = Column(Integer, ForeignKey("brains.brain_id")) - file_sha1 = Column(String) - - brain = relationship("Brain", back_populates="vectors") - - -class BrainSubscriptionInvitation(Base): - __tablename__ = "brain_subscription_invitations" - - id = Column(Integer, primary_key=True) # Assuming an integer primary key named 'id' - brain_id = Column(String, ForeignKey("brains.brain_id")) - email = Column(String, ForeignKey("users.email")) - rights = Column(String) - - brain = relationship("Brain") - user = relationship("User", foreign_keys=[email]) - - -class ApiKey(Base): - __tablename__ = "api_keys" - - key_id = Column(String, primary_key=True, default=lambda: str(uuid4())) - user_id = Column(Integer, ForeignKey("users.user_id")) - api_key = Column(String, unique=True) - creation_time = Column(DateTime, default=datetime.utcnow) - is_active = Column(Boolean, default=True) - deleted_time = Column(DateTime, nullable=True) - - user = relationship("User") diff --git a/backend/core/quivr_core/api/models/databases/supabase/vectors.py b/backend/core/quivr_core/api/models/vectors.py similarity index 88% rename from backend/core/quivr_core/api/models/databases/supabase/vectors.py rename to backend/core/quivr_core/api/models/vectors.py index b93ee701b542..e2d207d4d88b 100644 --- a/backend/core/quivr_core/api/models/databases/supabase/vectors.py +++ b/backend/core/quivr_core/api/models/vectors.py @@ -1,9 +1,11 @@ -from quivr_core.api.models.databases.repository import Repository +from postgrest.types import CountMethod +from quivr_core.api.models.settings import get_supabase_client -class Vector(Repository): - def __init__(self, supabase_client): - self.db = supabase_client + +class Vector: + def __init__(self): + self.db = get_supabase_client() def get_vectors_by_file_name(self, file_name): response = ( @@ -54,7 +56,7 @@ def get_vectors_by_batch(self, batch_id): self.db.table("vectors") .select( "name:metadata->>file_name, size:metadata->>file_size", - count="exact", + count=CountMethod.exact, ) .eq("id", batch_id) .execute() @@ -67,7 +69,7 @@ def get_vectors_in_batch(self, batch_ids): self.db.table("vectors") .select( "name:metadata->>file_name, size:metadata->>file_size", - count="exact", + count=CountMethod.exact, ) .in_("id", batch_ids) .execute() From bcd13441d7444b0e5334a3e4e0f9ca7b4640dacc Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 15:27:06 +0200 Subject: [PATCH 16/20] added rag service --- .../quivr_core/api/modules/rag/__init__.py | 0 .../quivr_core/api/modules/rag/rag_service.py | 293 ++++++++++++++++++ .../core/quivr_core/api/modules/rag/utils.py | 92 ++++++ 3 files changed, 385 insertions(+) create mode 100644 backend/core/quivr_core/api/modules/rag/__init__.py create mode 100644 backend/core/quivr_core/api/modules/rag/rag_service.py create mode 100644 backend/core/quivr_core/api/modules/rag/utils.py diff --git a/backend/core/quivr_core/api/modules/rag/__init__.py b/backend/core/quivr_core/api/modules/rag/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/backend/core/quivr_core/api/modules/rag/rag_service.py b/backend/core/quivr_core/api/modules/rag/rag_service.py new file mode 100644 index 000000000000..f00a8e6ce5ec --- /dev/null +++ b/backend/core/quivr_core/api/modules/rag/rag_service.py @@ -0,0 +1,293 @@ +import datetime +from uuid import UUID, uuid4 + +from langchain_community.chat_models import ChatLiteLLM + +from quivr_core.api.logger import get_logger +from quivr_core.api.models.settings import ( + get_embedding_client, + get_supabase_client, + settings, +) +from quivr_core.api.modules.brain.entity.brain_entity import BrainEntity +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.utils.format_chat_history import ( + format_chat_history, +) +from quivr_core.api.modules.chat.dto.inputs import CreateChatHistory +from quivr_core.api.modules.chat.dto.outputs import GetChatHistoryOutput +from quivr_core.api.modules.chat.service.chat_service import ChatService +from quivr_core.api.modules.knowledge.repository.knowledges import KnowledgeRepository +from quivr_core.api.modules.prompt.entity.prompt import Prompt +from quivr_core.api.modules.prompt.service.prompt_service import PromptService +from quivr_core.api.modules.user.entity.user_identity import UserIdentity +from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore +from quivr_core.config import RAGConfig +from quivr_core.models import ParsedRAGResponse, RAGResponseMetadata +from quivr_core.quivr_rag import QuivrQARAG + +from .utils import generate_source + +logger = get_logger(__name__) + + +class RAGService: + def __init__( + self, + current_user: UserIdentity, + brain_id: UUID | None, + chat_id: UUID, + brain_service: BrainService, + prompt_service: PromptService, + chat_service: ChatService, + knowledge_service: KnowledgeRepository, + ): + # Services + self.brain_service = brain_service + self.prompt_service = prompt_service + self.chat_service = chat_service + self.knowledge_service = knowledge_service + + # Base models + self.current_user = current_user + self.chat_id = chat_id + self.brain = self.get_or_create_brain(brain_id, self.current_user.id) + self.prompt = self.get_brain_prompt(self.brain) + + # check at init time + # TODO: how to get model ? + self.model_to_use = "" + # self.model_to_use = "" self.check_and_update_user_usage( + # self.current_user, self.brain + # ) + + def get_brain_prompt(self, brain: BrainEntity) -> Prompt | None: + return ( + self.prompt_service.get_prompt_by_id(brain.prompt_id) + if brain.prompt_id + else None + ) + + def get_llm(self, rag_config: RAGConfig): + api_base = ( + settings.ollama_api_base_url + if settings.ollama_api_base_url and rag_config.model.startswith("ollama") + else None + ) + return ChatLiteLLM( + temperature=rag_config.temperature, + max_tokens=rag_config.max_tokens, + model=rag_config.model, + streaming=rag_config.streaming, + verbose=False, + api_base=api_base, + ) # pyright: ignore reportPrivateUsage=none + + def get_or_create_brain(self, brain_id: UUID | None, user_id: UUID) -> BrainEntity: + brain = None + if brain_id is not None: + brain = self.brain_service.get_brain_details(brain_id, user_id) + + # TODO: Create if doesn't exist + assert brain + + if brain.integration: + assert brain.integration.user_id == user_id + return brain + + # def check_and_update_user_usage(self, user: UserIdentity, brain: BrainEntity): + # """Check user limits and raises if user reached his limits: + # 1. Raise if one of the conditions : + # - User doesn't have access to brains + # - Model of brain is not is user_settings.models + # - Latest sum_30d(user_daily_user) < user_settings.max_monthly_usage + # - Check sum(user_settings.daily_user_count)+ model_price < user_settings.monthly_chat_credits + # 2. Updates user usage + # """ + # # TODO(@aminediro) : THIS is bug prone, should retrieve it from DB here + # user_usage = UserUsage(id=user.id, email=user.email) + # user_settings = user_usage.get_user_settings() + # all_models = user_usage.get_models() + + # # TODO(@aminediro): refactor this function + # model_to_use = find_model_and_generate_metadata( + # brain.model, + # user_settings, + # all_models, + # ) + # cost = compute_cost(model_to_use, all_models) + # # Raises HTTP if user usage exceeds limits + # update_user_usage(user_usage, user_settings, cost) # noqa: F821 + # return model_to_use + + def create_vector_store( + self, brain_id: UUID, max_input: int + ) -> CustomSupabaseVectorStore: + supabase_client = get_supabase_client() + embeddings = get_embedding_client() + return CustomSupabaseVectorStore( + supabase_client, + embeddings, + table_name="vectors", + brain_id=brain_id, + max_input=max_input, + ) + + def save_answer(self, question: str, answer: ParsedRAGResponse): + return self.chat_service.update_chat_history( + CreateChatHistory( + **{ + "chat_id": self.chat_id, + "user_message": question, + "assistant": answer.answer, + "brain_id": self.brain.brain_id, + # TODO: prompt_id should always be not None + "prompt_id": self.prompt.id if self.prompt else None, + "metadata": answer.metadata.model_dump() if answer.metadata else {}, + } + ) + ) + + async def generate_answer( + self, + question: str, + ): + logger.info( + f"Creating question for chat {self.chat_id} with brain {self.brain.brain_id} " + ) + rag_config = RAGConfig( + model=self.model_to_use.name, + temperature=self.brain.temperature, + max_input=self.model_to_use.max_input, + max_tokens=self.brain.max_tokens, + prompt=self.prompt.content if self.prompt else None, + streaming=False, + ) + history = await self.chat_service.get_chat_history(self.chat_id) + # Get list of files + list_files = self.knowledge_service.get_all_knowledge_in_brain( + self.brain.brain_id + ) + # Build RAG dependencies to inject + vector_store = self.create_vector_store( + self.brain.brain_id, rag_config.max_input + ) + llm = self.get_llm(rag_config) + # Initialize the RAG pipline + rag_pipeline = QuivrQARAG( + rag_config=rag_config, llm=llm, vector_store=vector_store + ) + # Format the history, sanitize the input + transformed_history = format_chat_history(history) + + parsed_response = rag_pipeline.answer(question, transformed_history, list_files) + + # Save the answer to db + new_chat_entry = self.save_answer(question, parsed_response) + + # Format output to be correct + return GetChatHistoryOutput( + **{ + "chat_id": self.chat_id, + "user_message": question, + "assistant": parsed_response.answer, + "message_time": new_chat_entry.message_time, + "prompt_title": (self.prompt.title if self.prompt else None), + "brain_name": self.brain.name if self.brain else None, + "message_id": new_chat_entry.message_id, + "brain_id": str(self.brain.brain_id) if self.brain else None, + "metadata": ( + parsed_response.metadata.model_dump() + if parsed_response.metadata + else {} + ), + } + ) + + async def generate_answer_stream( + self, + question: str, + ): + logger.info( + f"Creating question for chat {self.chat_id} with brain {self.brain.brain_id} " + ) + # Build the rag config + rag_config = RAGConfig( + model=self.model_to_use.name, + temperature=self.brain.temperature, + max_input=self.model_to_use.max_input, + max_tokens=self.brain.max_tokens, + prompt=self.prompt.content if self.prompt else "", + streaming=True, + ) + # Getting chat history + history = await self.chat_service.get_chat_history(self.chat_id) + # Format the history, sanitize the input + transformed_history = format_chat_history(history) + + # Get list of files urls + # TODO: Why do we get ALL the files ? + list_files = self.knowledge_service.get_all_knowledge_in_brain( + self.brain.brain_id + ) + llm = self.get_llm(rag_config) + vector_store = self.create_vector_store( + self.brain.brain_id, rag_config.max_input + ) + # Initialize the rag pipline + rag_pipeline = QuivrQARAG( + rag_config=rag_config, llm=llm, vector_store=vector_store + ) + + full_answer = "" + + message_metadata = { + "chat_id": self.chat_id, + "message_id": uuid4(), # do we need it ?, + "user_message": question, # TODO: define result + "message_time": datetime.datetime.now(), # TODO: define result + "prompt_title": (self.prompt.title if self.prompt else ""), + "brain_name": self.brain.name if self.brain else None, + "brain_id": self.brain.brain_id if self.brain else None, + } + + async for response in rag_pipeline.answer_astream( + question, transformed_history, list_files + ): + # Format output to be correct servicedf;j + if not response.last_chunk: + streamed_chat_history = GetChatHistoryOutput( + assistant=response.answer, + metadata=response.metadata.model_dump(), + **message_metadata, + ) + full_answer += response.answer + yield f"data: {streamed_chat_history.model_dump_json()}" + + # For last chunk parse the sources, and the full answer + streamed_chat_history = GetChatHistoryOutput( + assistant=response.answer, + metadata=response.metadata.model_dump(), + **message_metadata, + ) + + sources_urls = generate_source( + response.metadata.sources, + self.brain.brain_id, + ( + streamed_chat_history.metadata["citations"] + if streamed_chat_history.metadata + else None + ), + ) + if streamed_chat_history.metadata: + streamed_chat_history.metadata["sources"] = sources_urls + + self.save_answer( + question, + ParsedRAGResponse( + answer=full_answer, + metadata=RAGResponseMetadata(**streamed_chat_history.metadata), + ), + ) + yield f"data: {streamed_chat_history.model_dump_json()}" diff --git a/backend/core/quivr_core/api/modules/rag/utils.py b/backend/core/quivr_core/api/modules/rag/utils.py new file mode 100644 index 000000000000..414b2bcdd811 --- /dev/null +++ b/backend/core/quivr_core/api/modules/rag/utils.py @@ -0,0 +1,92 @@ +from typing import Any, List +from uuid import UUID + +from quivr_api.modules.chat.dto.chats import Sources +from quivr_api.modules.upload.service.generate_file_signed_url import ( + generate_file_signed_url, +) + +from quivr_core.api.logger import get_logger + +logger = get_logger(__name__) + + +def generate_source( + source_documents: List[Any] | None, + brain_id: UUID, + citations: List[int] | None = None, +) -> List[Sources]: + """ + Generate the sources list for the answer + It takes in a list of sources documents and citations that points to the docs index that was used in the answer + """ + # Initialize an empty list for sources + sources_list: List[Sources] = [] + + # Initialize a dictionary for storing generated URLs + generated_urls = {} + + # remove duplicate sources with same name and create a list of unique sources + sources_url_cache = {} + + # Get source documents from the result, default to an empty list if not found + # If source documents exist + if source_documents: + logger.info(f"Citations {citations}") + for index, doc in enumerate(source_documents): + logger.info(f"Processing source document {doc.metadata['file_name']}") + if citations is not None: + if index not in citations: + logger.info(f"Skipping source document {doc.metadata['file_name']}") + continue + # Check if 'url' is in the document metadata + is_url = ( + "original_file_name" in doc.metadata + and doc.metadata["original_file_name"] is not None + and doc.metadata["original_file_name"].startswith("http") + ) + + # Determine the name based on whether it's a URL or a file + name = ( + doc.metadata["original_file_name"] + if is_url + else doc.metadata["file_name"] + ) + + # Determine the type based on whether it's a URL or a file + type_ = "url" if is_url else "file" + + # Determine the source URL based on whether it's a URL or a file + if is_url: + source_url = doc.metadata["original_file_name"] + else: + file_path = f"{brain_id}/{doc.metadata['file_name']}" + # Check if the URL has already been generated + if file_path in generated_urls: + source_url = generated_urls[file_path] + else: + # Generate the URL + if file_path in sources_url_cache: + source_url = sources_url_cache[file_path] + else: + generated_url = generate_file_signed_url(file_path) + if generated_url is not None: + source_url = generated_url.get("signedURL", "") + else: + source_url = "" + # Store the generated URL + generated_urls[file_path] = source_url + + # Append a new Sources object to the list + sources_list.append( + Sources( + name=name, + type=type_, + source_url=source_url, + original_file_name=name, + citation=doc.page_content, + ) + ) + else: + logger.info("No source documents found or source_documents is not a list.") + return sources_list From dca468d106278c3c7a1db3d604f2abeeced2d70f Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 15:27:34 +0200 Subject: [PATCH 17/20] notification service --- .../quivr_core/api/modules/notification/dto/inputs.py | 4 +++- .../quivr_core/api/modules/notification/dto/outputs.py | 0 .../api/modules/notification/repository/notifications.py | 8 ++++---- .../notification/repository/notifications_interface.py | 4 ++-- .../modules/notification/service/notification_service.py | 8 ++++---- 5 files changed, 13 insertions(+), 11 deletions(-) delete mode 100644 backend/core/quivr_core/api/modules/notification/dto/outputs.py diff --git a/backend/core/quivr_core/api/modules/notification/dto/inputs.py b/backend/core/quivr_core/api/modules/notification/dto/inputs.py index 7aed0fa3c161..8c4d22ca25f2 100644 --- a/backend/core/quivr_core/api/modules/notification/dto/inputs.py +++ b/backend/core/quivr_core/api/modules/notification/dto/inputs.py @@ -2,7 +2,9 @@ from uuid import UUID from pydantic import BaseModel -from quivr_api.modules.notification.entity.notification import NotificationsStatusEnum +from quivr_core.api.modules.notification.entity.notification import ( + NotificationsStatusEnum, +) class CreateNotification(BaseModel): diff --git a/backend/core/quivr_core/api/modules/notification/dto/outputs.py b/backend/core/quivr_core/api/modules/notification/dto/outputs.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/backend/core/quivr_core/api/modules/notification/repository/notifications.py b/backend/core/quivr_core/api/modules/notification/repository/notifications.py index fc0af6073e52..75b36e8db4c6 100644 --- a/backend/core/quivr_core/api/modules/notification/repository/notifications.py +++ b/backend/core/quivr_core/api/modules/notification/repository/notifications.py @@ -1,7 +1,7 @@ -from quivr_api.logger import get_logger -from quivr_api.modules.notification.dto.inputs import CreateNotification -from quivr_api.modules.notification.entity.notification import Notification -from quivr_api.modules.notification.repository.notifications_interface import ( +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.notification.dto.inputs import CreateNotification +from quivr_core.api.modules.notification.entity.notification import Notification +from quivr_core.api.modules.notification.repository.notifications_interface import ( NotificationInterface, ) diff --git a/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py b/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py index e80740a825b3..423e46df1917 100644 --- a/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py +++ b/backend/core/quivr_core/api/modules/notification/repository/notifications_interface.py @@ -1,11 +1,11 @@ from abc import ABC, abstractmethod from uuid import UUID -from quivr_api.modules.notification.dto.inputs import ( +from quivr_core.api.modules.notification.dto.inputs import ( CreateNotification, NotificationUpdatableProperties, ) -from quivr_api.modules.notification.entity.notification import Notification +from quivr_core.api.modules.notification.entity.notification import Notification class NotificationInterface(ABC): diff --git a/backend/core/quivr_core/api/modules/notification/service/notification_service.py b/backend/core/quivr_core/api/modules/notification/service/notification_service.py index 295bcd9c91da..7fd86bd5ef8b 100644 --- a/backend/core/quivr_core/api/modules/notification/service/notification_service.py +++ b/backend/core/quivr_core/api/modules/notification/service/notification_service.py @@ -1,10 +1,10 @@ -from quivr_api.models.settings import get_supabase_client -from quivr_api.modules.notification.dto.inputs import ( +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.notification.dto.inputs import ( CreateNotification, NotificationUpdatableProperties, ) -from quivr_api.modules.notification.repository.notifications import Notifications -from quivr_api.modules.notification.repository.notifications_interface import ( +from quivr_core.api.modules.notification.repository.notifications import Notifications +from quivr_core.api.modules.notification.repository.notifications_interface import ( NotificationInterface, ) From 3a818631bec759313aab1bc31cec7903d4d4f1bf Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 16:05:13 +0200 Subject: [PATCH 18/20] working quivr_core api no deps --- backend/core/quivr_core/api/celery_config.py | 2 - backend/core/quivr_core/api/celery_worker.py | 182 ++---------------- backend/core/quivr_core/api/dependencies.py | 51 ----- backend/core/quivr_core/api/main.py | 26 ++- .../quivr_core/api/modules/dependencies.py | 1 - .../modules/prompt/service/prompt_service.py | 2 +- .../core/quivr_core/api/modules/rag/utils.py | 7 +- .../user/controller/user_controller.py | 3 +- .../api/packages/embeddings/vectors.py | 11 +- .../api/packages/files/parsers/audio.py | 2 +- .../api/packages/files/parsers/common.py | 2 +- .../api/packages/files/parsers/github.py | 4 +- .../api/packages/files/parsers/telegram.py | 2 +- .../api/packages/quivr_core/utils.py | 4 +- .../quivr_core/api/vectorstore/supabase.py | 3 +- .../core/quivr_core/storage/local_storage.py | 16 +- 16 files changed, 66 insertions(+), 252 deletions(-) delete mode 100644 backend/core/quivr_core/api/dependencies.py diff --git a/backend/core/quivr_core/api/celery_config.py b/backend/core/quivr_core/api/celery_config.py index 9ee82370361c..e4752f8dec35 100644 --- a/backend/core/quivr_core/api/celery_config.py +++ b/backend/core/quivr_core/api/celery_config.py @@ -39,5 +39,3 @@ ) else: raise ValueError(f"Unsupported broker URL: {CELERY_BROKER_URL}") - -celery.autodiscover_tasks(["quivr_api.modules.sync.tasks"]) diff --git a/backend/core/quivr_core/api/celery_worker.py b/backend/core/quivr_core/api/celery_worker.py index 318fbc545ed8..b4f75fdac9d3 100644 --- a/backend/core/quivr_core/api/celery_worker.py +++ b/backend/core/quivr_core/api/celery_worker.py @@ -1,38 +1,33 @@ import os -from datetime import datetime, timedelta from tempfile import NamedTemporaryFile from uuid import UUID from celery.schedules import crontab -from pytz import timezone -from quivr_core.celery_config import celery -from quivr_core.logger import get_logger -from quivr_core.middlewares.auth.auth_bearer import AuthBearer -from quivr_core.models.files import File -from quivr_core.models.settings import get_supabase_client, get_supabase_db -from quivr_core.modules.brain.integrations.Notion.Notion_connector import ( - NotionConnector, +from quivr_core.api.celery_config import celery +from quivr_core.api.logger import get_logger +from quivr_core.api.models.files import File +from quivr_core.api.models.settings import get_supabase_client +from quivr_core.api.modules.brain.service.brain_service import BrainService +from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService +from quivr_core.api.modules.notification.dto.inputs import ( + NotificationUpdatableProperties, ) -from quivr_core.modules.brain.service.brain_service import BrainService -from quivr_core.modules.brain.service.brain_vector_service import BrainVectorService -from quivr_core.modules.notification.dto.inputs import NotificationUpdatableProperties -from quivr_core.modules.notification.entity.notification import NotificationsStatusEnum -from quivr_core.modules.notification.service.notification_service import ( +from quivr_core.api.modules.notification.entity.notification import ( + NotificationsStatusEnum, +) +from quivr_core.api.modules.notification.service.notification_service import ( NotificationService, ) -from quivr_core.modules.onboarding.service.onboarding_service import OnboardingService -from quivr_core.packages.files.crawl.crawler import CrawlWebsite, slugify -from quivr_core.packages.files.parsers.github import process_github -from quivr_core.packages.files.processors import filter_file -from quivr_core.packages.utils.telemetry import maybe_send_telemetry +from quivr_core.api.packages.files.crawl.crawler import CrawlWebsite, slugify +from quivr_core.api.packages.files.parsers.github import process_github +from quivr_core.api.packages.files.processors import filter_file +from quivr_core.api.packages.utils.telemetry import maybe_send_telemetry logger = get_logger(__name__) -onboardingService = OnboardingService() notification_service = NotificationService() brain_service = BrainService() -auth_bearer = AuthBearer() @celery.task(name="process_file_and_notify") @@ -158,157 +153,12 @@ def process_crawl_and_notify( return True -@celery.task -def remove_onboarding_more_than_x_days_task(): - onboardingService.remove_onboarding_more_than_x_days(7) - - -@celery.task(name="NotionConnectorLoad") -def process_integration_brain_created_initial_load(brain_id, user_id): - notion_connector = NotionConnector(brain_id=brain_id, user_id=user_id) - - pages = notion_connector.load() - - print("pages: ", len(pages)) - - -@celery.task -def process_integration_brain_sync_user_brain(brain_id, user_id): - notion_connector = NotionConnector(brain_id=brain_id, user_id=user_id) - - notion_connector.poll() - - @celery.task def ping_telemetry(): maybe_send_telemetry("ping", {"ping": "pong"}) -@celery.task(name="check_if_is_premium_user") -def check_if_is_premium_user(): - supabase = get_supabase_db() - supabase_db = supabase.db - - paris_tz = timezone("Europe/Paris") - current_time = datetime.now(paris_tz) - current_time_str = current_time.strftime("%Y-%m-%d %H:%M:%S.%f") - logger.debug(f"Current time: {current_time_str}") - - # Define the memoization period (e.g., 1 hour) - memoization_period = timedelta(hours=1) - memoization_cutoff = current_time - memoization_period - - # Fetch all necessary data in bulk - subscriptions = ( - supabase_db.table("subscriptions") - .select("*") - .filter("current_period_end", "gt", current_time_str) - .execute() - ).data - - customers = (supabase_db.table("customers").select("*").execute()).data - - customer_emails = [customer["email"] for customer in customers] - - # Split customer emails into batches of 50 - email_batches = [ - customer_emails[i : i + 20] for i in range(0, len(customer_emails), 20) - ] - - users = [] - for email_batch in email_batches: - batch_users = ( - supabase_db.table("users") - .select("id, email") - .in_("email", email_batch) - .execute() - ).data - users.extend(batch_users) - - product_features = ( - supabase_db.table("product_to_features").select("*").execute() - ).data - - user_settings = (supabase_db.table("user_settings").select("*").execute()).data - - # Create lookup dictionaries for faster access - user_dict = {user["email"]: user["id"] for user in users} - customer_dict = {customer["id"]: customer for customer in customers} - product_dict = { - product["stripe_product_id"]: product for product in product_features - } - settings_dict = {setting["user_id"]: setting for setting in user_settings} - - # Process subscriptions and update user settings - premium_user_ids = set() - settings_to_upsert = {} - for sub in subscriptions: - if sub["attrs"]["status"] != "active": - continue - - customer = customer_dict.get(sub["customer"]) - if not customer: - continue - - user_id = user_dict.get(customer["email"]) - if not user_id: - continue - - current_settings = settings_dict.get(user_id, {}) - last_check = current_settings.get("last_stripe_check") - - # Skip if the user was checked recently - if last_check and datetime.fromisoformat(last_check) > memoization_cutoff: - premium_user_ids.add(user_id) - continue - - user_id = str(user_id) # Ensure user_id is a string - premium_user_ids.add(user_id) - - product_id = sub["attrs"]["items"]["data"][0]["plan"]["product"] - product = product_dict.get(product_id) - if not product: - logger.warning(f"No matching product found for subscription: {sub['id']}") - continue - - settings_to_upsert[user_id] = { - "user_id": user_id, - "max_brains": product["max_brains"], - "max_brain_size": product["max_brain_size"], - "monthly_chat_credit": product["monthly_chat_credit"], - "api_access": product["api_access"], - "models": product["models"], - "is_premium": True, - "last_stripe_check": current_time_str, - } - - # Bulk upsert premium user settings in batches of 10 - settings_list = list(settings_to_upsert.values()) - for i in range(0, len(settings_list), 10): - batch = settings_list[i : i + 10] - supabase_db.table("user_settings").upsert(batch).execute() - - # Delete settings for non-premium users in batches of 10 - settings_to_delete = [ - setting["user_id"] - for setting in user_settings - if setting["user_id"] not in premium_user_ids and setting.get("is_premium") - ] - for i in range(0, len(settings_to_delete), 10): - batch = settings_to_delete[i : i + 10] - supabase_db.table("user_settings").delete().in_("user_id", batch).execute() - - logger.info( - f"Updated {len(settings_to_upsert)} premium users, deleted settings for {len(settings_to_delete)} non-premium users" - ) - return True - - celery.conf.beat_schedule = { - "remove_onboarding_more_than_x_days_task": { - "task": f"{__name__}.remove_onboarding_more_than_x_days_task", - "schedule": crontab(minute="0", hour="0"), - }, "ping_telemetry": { "task": f"{__name__}.ping_telemetry", "schedule": crontab(minute="*/30", hour="*"), diff --git a/backend/core/quivr_core/api/dependencies.py b/backend/core/quivr_core/api/dependencies.py deleted file mode 100644 index d66a773365fc..000000000000 --- a/backend/core/quivr_core/api/dependencies.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -from typing import AsyncGenerator, Callable, Type, TypeVar - -from fastapi import Depends -from sqlalchemy.ext.asyncio import create_async_engine -from sqlmodel.ext.asyncio.session import AsyncSession - -from quivr_core.api.repositories.base_repository import BaseRepository -from quivr_core.api.services.base_service import BaseService -from quivr_core.models.settings import settings -from quivr_core.storage.local_storage import LocalStorage -from quivr_core.storage.storage_base import StorageBase - -R = TypeVar("R", bound=BaseRepository) -S = TypeVar("S", bound=BaseService) - -async_engine = create_async_engine( - settings.pg_database_async_url, - echo=True if os.getenv("ORM_DEBUG") else False, - future=True, -) - -# TODO: get env variable and set it -storage = LocalStorage() - - -def get_storage() -> StorageBase: - return storage - - -async def get_async_session() -> AsyncGenerator[AsyncSession, None]: - async with AsyncSession(async_engine) as session: - yield session - - -def get_repository(repository_model: Type[R]) -> Callable[..., R]: - def _get_repository(session: AsyncSession = Depends(get_async_session)) -> R: - return repository_model(session) - - return _get_repository - - -def get_service(service: Type[S]) -> Callable[..., S]: - def _get_service( - repository: BaseRepository = Depends( - get_repository(service.get_repository_cls()) - ), - ) -> S: - return service(repository) - - return _get_service diff --git a/backend/core/quivr_core/api/main.py b/backend/core/quivr_core/api/main.py index a75da91d0d1f..d19efc3b2835 100644 --- a/backend/core/quivr_core/api/main.py +++ b/backend/core/quivr_core/api/main.py @@ -1,6 +1,6 @@ -from dotenv import load_dotenv -from fastapi import FastAPI +import logging +from fastapi import FastAPI from quivr_core.api.modules.brain.controller import brain_router from quivr_core.api.modules.chat.controller import chat_router from quivr_core.api.modules.knowledge.controller import knowledge_router @@ -8,15 +8,33 @@ from quivr_core.api.modules.upload.controller import upload_router from quivr_core.api.modules.user.controller import user_router -load_dotenv() +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("LiteLLM").setLevel(logging.WARNING) +logging.getLogger("litellm").setLevel(logging.WARNING) app = FastAPI() app.include_router(brain_router) app.include_router(chat_router) - app.include_router(upload_router) app.include_router(user_router) app.include_router(prompt_router) app.include_router(knowledge_router) + + +@app.get("/") +async def root(): + return {"status": "OK"} + + +@app.get("/healthz", tags=["Health"]) +async def healthz(): + return {"status": "ok"} + + +if __name__ == "__main__": + # run main.py to debug backend + import uvicorn + + uvicorn.run(app, host="0.0.0.0", port=5050, log_level="debug", access_log=False) diff --git a/backend/core/quivr_core/api/modules/dependencies.py b/backend/core/quivr_core/api/modules/dependencies.py index c59e4533e4fc..fc34b560bfb7 100644 --- a/backend/core/quivr_core/api/modules/dependencies.py +++ b/backend/core/quivr_core/api/modules/dependencies.py @@ -32,7 +32,6 @@ def get_repository_cls(cls) -> Type[R]: S = TypeVar("S", bound=BaseService) -# TODO: env variable debug sql_alchemy async_engine = create_async_engine( settings.pg_database_async_url, echo=True if os.getenv("ORM_DEBUG") else False, diff --git a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py index ba6f733de461..c6c033823fc1 100644 --- a/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py +++ b/backend/core/quivr_core/api/modules/prompt/service/prompt_service.py @@ -15,7 +15,7 @@ class PromptService(BaseService[PromptRepository]): - repository: PromptRepository + repository_cls = PromptRepository def __init__(self, repository: PromptRepository): self.repository = repository diff --git a/backend/core/quivr_core/api/modules/rag/utils.py b/backend/core/quivr_core/api/modules/rag/utils.py index 414b2bcdd811..908fa2b69545 100644 --- a/backend/core/quivr_core/api/modules/rag/utils.py +++ b/backend/core/quivr_core/api/modules/rag/utils.py @@ -1,13 +1,12 @@ from typing import Any, List from uuid import UUID -from quivr_api.modules.chat.dto.chats import Sources -from quivr_api.modules.upload.service.generate_file_signed_url import ( +from quivr_core.api.logger import get_logger +from quivr_core.api.modules.chat.dto.chats import Sources +from quivr_core.api.modules.upload.service.generate_file_signed_url import ( generate_file_signed_url, ) -from quivr_core.api.logger import get_logger - logger = get_logger(__name__) diff --git a/backend/core/quivr_core/api/modules/user/controller/user_controller.py b/backend/core/quivr_core/api/modules/user/controller/user_controller.py index 850aafa4ee99..413f4c5b08b5 100644 --- a/backend/core/quivr_core/api/modules/user/controller/user_controller.py +++ b/backend/core/quivr_core/api/modules/user/controller/user_controller.py @@ -2,9 +2,8 @@ from fastapi import APIRouter, Depends -from quivr_core.api.dependencies import get_service from quivr_core.api.modules.brain.service.brain_user_service import BrainUserService -from quivr_core.api.modules.dependencies import get_current_user +from quivr_core.api.modules.dependencies import get_current_user, get_service from quivr_core.api.modules.user.dto.inputs import UserUpdatableProperties from quivr_core.api.modules.user.entity.user_identity import UserIdentity from quivr_core.api.modules.user.service.user_service import UserService diff --git a/backend/core/quivr_core/api/packages/embeddings/vectors.py b/backend/core/quivr_core/api/packages/embeddings/vectors.py index 7a6ee2a8e681..8c1ab42dbe69 100644 --- a/backend/core/quivr_core/api/packages/embeddings/vectors.py +++ b/backend/core/quivr_core/api/packages/embeddings/vectors.py @@ -8,12 +8,15 @@ from quivr_core.api.models.settings import ( get_documents_vector_store, get_embedding_client, - get_supabase_db, ) +from quivr_core.api.models.vectors import Vector logger = get_logger(__name__) +vector_repository = Vector() + + # TODO: Create interface for embeddings and implement it for Supabase and OpenAI (current Quivr) class Neurons(BaseModel): def create_vector(self, docs): @@ -37,13 +40,11 @@ def error_callback(exception): def process_batch(batch_ids: List[str]): - supabase_db = get_supabase_db() - try: if len(batch_ids) == 1: - return (supabase_db.get_vectors_by_batch(UUID(batch_ids[0]))).data + return (vector_repository.get_vectors_by_batch(UUID(batch_ids[0]))).data else: - return (supabase_db.get_vectors_in_batch(batch_ids)).data + return (vector_repository.get_vectors_in_batch(batch_ids)).data except Exception as e: logger.error("Error retrieving batched vectors", e) diff --git a/backend/core/quivr_core/api/packages/files/parsers/audio.py b/backend/core/quivr_core/api/packages/files/parsers/audio.py index c090bba045a0..7b7e08039040 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/audio.py +++ b/backend/core/quivr_core/api/packages/files/parsers/audio.py @@ -6,7 +6,7 @@ from quivr_core.api.models.files import File from quivr_core.api.models.settings import get_documents_vector_store -from quivr_core.packages.files.file import compute_sha1_from_content +from quivr_core.api.packages.files.file import compute_sha1_from_content def process_audio(file: File, **kwargs): diff --git a/backend/core/quivr_core/api/packages/files/parsers/common.py b/backend/core/quivr_core/api/packages/files/parsers/common.py index 5ffc49ad1e7c..642b8ecc9d48 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/common.py +++ b/backend/core/quivr_core/api/packages/files/parsers/common.py @@ -14,7 +14,7 @@ from quivr_core.api.models.files import File from quivr_core.api.modules.brain.service.brain_vector_service import BrainVectorService from quivr_core.api.modules.upload.service.upload_file import DocumentSerializable -from quivr_core.packages.embeddings.vectors import Neurons +from quivr_core.api.packages.embeddings.vectors import Neurons if not isinstance(asyncio.get_event_loop(), uvloop.Loop): nest_asyncio.apply() diff --git a/backend/core/quivr_core/api/packages/files/parsers/github.py b/backend/core/quivr_core/api/packages/files/parsers/github.py index f07af638130e..89575499ef8f 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/github.py +++ b/backend/core/quivr_core/api/packages/files/parsers/github.py @@ -6,8 +6,8 @@ from langchain_community.document_loaders import GitLoader from quivr_core.api.models.files import File -from quivr_core.packages.embeddings.vectors import Neurons -from quivr_core.packages.files.file import compute_sha1_from_content +from quivr_core.api.packages.embeddings.vectors import Neurons +from quivr_core.api.packages.files.file import compute_sha1_from_content def process_github( diff --git a/backend/core/quivr_core/api/packages/files/parsers/telegram.py b/backend/core/quivr_core/api/packages/files/parsers/telegram.py index be64655c1f47..c18322510f37 100644 --- a/backend/core/quivr_core/api/packages/files/parsers/telegram.py +++ b/backend/core/quivr_core/api/packages/files/parsers/telegram.py @@ -1,5 +1,5 @@ from quivr_core.api.models.files import File -from quivr_core.packages.files.loaders.telegram import TelegramChatFileLoader +from quivr_core.api.packages.files.loaders.telegram import TelegramChatFileLoader from .common import process_file diff --git a/backend/core/quivr_core/api/packages/quivr_core/utils.py b/backend/core/quivr_core/api/packages/quivr_core/utils.py index 101b147725a7..29e1f965c7d8 100644 --- a/backend/core/quivr_core/api/packages/quivr_core/utils.py +++ b/backend/core/quivr_core/api/packages/quivr_core/utils.py @@ -17,13 +17,13 @@ from quivr_core.api.modules.upload.service.generate_file_signed_url import ( generate_file_signed_url, ) -from quivr_core.packages.quivr_core.models import ( +from quivr_core.api.packages.quivr_core.models import ( ParsedRAGChunkResponse, ParsedRAGResponse, RAGResponseMetadata, RawRAGResponse, ) -from quivr_core.packages.quivr_core.prompts import DEFAULT_DOCUMENT_PROMPT +from quivr_core.api.packages.quivr_core.prompts import DEFAULT_DOCUMENT_PROMPT # TODO(@aminediro): define a types packages where we clearly define IO types # This should be used for serialization/deseriallization later diff --git a/backend/core/quivr_core/api/vectorstore/supabase.py b/backend/core/quivr_core/api/vectorstore/supabase.py index 9e017c8955f1..980c3a90a5f6 100644 --- a/backend/core/quivr_core/api/vectorstore/supabase.py +++ b/backend/core/quivr_core/api/vectorstore/supabase.py @@ -4,9 +4,10 @@ from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain_community.vectorstores import SupabaseVectorStore -from quivr_api.logger import get_logger from supabase.client import Client +from quivr_core.api.logger import get_logger + logger = get_logger(__name__) diff --git a/backend/core/quivr_core/storage/local_storage.py b/backend/core/quivr_core/storage/local_storage.py index 3e1a642c1965..cf3e38260016 100644 --- a/backend/core/quivr_core/storage/local_storage.py +++ b/backend/core/quivr_core/storage/local_storage.py @@ -13,6 +13,14 @@ class QuivrFile: __slots__ = ["fd", "filename", "brain_id", "file_size"] + def __init__( + self, fd: BinaryIO, filename: str, brain_id: UUID, file_size: int | None = None + ) -> None: + self.fd = fd + self.brain_id = brain_id + self.filename = filename + self.file_size = file_size + @classmethod def from_path(cls, path: str): fd = os.open(path, os.O_RDONLY) @@ -27,14 +35,6 @@ def from_path(cls, path: str): file_size=file_size, ) - def __init__( - self, fd: BinaryIO, filename: str, brain_id: UUID, file_size: int | None = None - ) -> None: - self.fd = fd - self.brain_id = brain_id - self.filename = filename - self.file_size = file_size - def local_filepath(self) -> str: return os.path.join(str(self.brain_id), self.filename) From 3d302dc1d52d6ee319d8db9d522e45b6512c8927 Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 16:05:57 +0200 Subject: [PATCH 19/20] added api deps for core --- backend/core/poetry.lock | 1037 ++++++++++++++++++++++++++++++++++- backend/core/pyproject.toml | 9 + 2 files changed, 1045 insertions(+), 1 deletion(-) diff --git a/backend/core/poetry.lock b/backend/core/poetry.lock index 6994fba464de..577b400a67cf 100644 --- a/backend/core/poetry.lock +++ b/backend/core/poetry.lock @@ -1,5 +1,16 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +[[package]] +name = "aiofiles" +version = "24.1.0" +description = "File support for asyncio." +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5"}, + {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, +] + [[package]] name = "aiohttp" version = "3.9.5" @@ -120,6 +131,94 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "async-timeout" +version = "4.0.3" +description = "Timeout context manager for asyncio programs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, + {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, +] + +[[package]] +name = "asyncpg" +version = "0.29.0" +description = "An asyncio PostgreSQL driver" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "asyncpg-0.29.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72fd0ef9f00aeed37179c62282a3d14262dbbafb74ec0ba16e1b1864d8a12169"}, + {file = "asyncpg-0.29.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:52e8f8f9ff6e21f9b39ca9f8e3e33a5fcdceaf5667a8c5c32bee158e313be385"}, + {file = "asyncpg-0.29.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e6823a7012be8b68301342ba33b4740e5a166f6bbda0aee32bc01638491a22"}, + {file = "asyncpg-0.29.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:746e80d83ad5d5464cfbf94315eb6744222ab00aa4e522b704322fb182b83610"}, + {file = "asyncpg-0.29.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ff8e8109cd6a46ff852a5e6bab8b0a047d7ea42fcb7ca5ae6eaae97d8eacf397"}, + {file = "asyncpg-0.29.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:97eb024685b1d7e72b1972863de527c11ff87960837919dac6e34754768098eb"}, + {file = "asyncpg-0.29.0-cp310-cp310-win32.whl", hash = "sha256:5bbb7f2cafd8d1fa3e65431833de2642f4b2124be61a449fa064e1a08d27e449"}, + {file = "asyncpg-0.29.0-cp310-cp310-win_amd64.whl", hash = "sha256:76c3ac6530904838a4b650b2880f8e7af938ee049e769ec2fba7cd66469d7772"}, + {file = "asyncpg-0.29.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4900ee08e85af01adb207519bb4e14b1cae8fd21e0ccf80fac6aa60b6da37b4"}, + {file = "asyncpg-0.29.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a65c1dcd820d5aea7c7d82a3fdcb70e096f8f70d1a8bf93eb458e49bfad036ac"}, + {file = "asyncpg-0.29.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b52e46f165585fd6af4863f268566668407c76b2c72d366bb8b522fa66f1870"}, + {file = "asyncpg-0.29.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc600ee8ef3dd38b8d67421359779f8ccec30b463e7aec7ed481c8346decf99f"}, + {file = "asyncpg-0.29.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:039a261af4f38f949095e1e780bae84a25ffe3e370175193174eb08d3cecab23"}, + {file = "asyncpg-0.29.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6feaf2d8f9138d190e5ec4390c1715c3e87b37715cd69b2c3dfca616134efd2b"}, + {file = "asyncpg-0.29.0-cp311-cp311-win32.whl", hash = "sha256:1e186427c88225ef730555f5fdda6c1812daa884064bfe6bc462fd3a71c4b675"}, + {file = "asyncpg-0.29.0-cp311-cp311-win_amd64.whl", hash = "sha256:cfe73ffae35f518cfd6e4e5f5abb2618ceb5ef02a2365ce64f132601000587d3"}, + {file = "asyncpg-0.29.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6011b0dc29886ab424dc042bf9eeb507670a3b40aece3439944006aafe023178"}, + {file = "asyncpg-0.29.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b544ffc66b039d5ec5a7454667f855f7fec08e0dfaf5a5490dfafbb7abbd2cfb"}, + {file = "asyncpg-0.29.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d84156d5fb530b06c493f9e7635aa18f518fa1d1395ef240d211cb563c4e2364"}, + {file = "asyncpg-0.29.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54858bc25b49d1114178d65a88e48ad50cb2b6f3e475caa0f0c092d5f527c106"}, + {file = "asyncpg-0.29.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bde17a1861cf10d5afce80a36fca736a86769ab3579532c03e45f83ba8a09c59"}, + {file = "asyncpg-0.29.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:37a2ec1b9ff88d8773d3eb6d3784dc7e3fee7756a5317b67f923172a4748a175"}, + {file = "asyncpg-0.29.0-cp312-cp312-win32.whl", hash = "sha256:bb1292d9fad43112a85e98ecdc2e051602bce97c199920586be83254d9dafc02"}, + {file = "asyncpg-0.29.0-cp312-cp312-win_amd64.whl", hash = "sha256:2245be8ec5047a605e0b454c894e54bf2ec787ac04b1cb7e0d3c67aa1e32f0fe"}, + {file = "asyncpg-0.29.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0009a300cae37b8c525e5b449233d59cd9868fd35431abc470a3e364d2b85cb9"}, + {file = "asyncpg-0.29.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cad1324dbb33f3ca0cd2074d5114354ed3be2b94d48ddfd88af75ebda7c43cc"}, + {file = "asyncpg-0.29.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:012d01df61e009015944ac7543d6ee30c2dc1eb2f6b10b62a3f598beb6531548"}, + {file = "asyncpg-0.29.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000c996c53c04770798053e1730d34e30cb645ad95a63265aec82da9093d88e7"}, + {file = "asyncpg-0.29.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e0bfe9c4d3429706cf70d3249089de14d6a01192d617e9093a8e941fea8ee775"}, + {file = "asyncpg-0.29.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:642a36eb41b6313ffa328e8a5c5c2b5bea6ee138546c9c3cf1bffaad8ee36dd9"}, + {file = "asyncpg-0.29.0-cp38-cp38-win32.whl", hash = "sha256:a921372bbd0aa3a5822dd0409da61b4cd50df89ae85150149f8c119f23e8c408"}, + {file = "asyncpg-0.29.0-cp38-cp38-win_amd64.whl", hash = "sha256:103aad2b92d1506700cbf51cd8bb5441e7e72e87a7b3a2ca4e32c840f051a6a3"}, + {file = "asyncpg-0.29.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5340dd515d7e52f4c11ada32171d87c05570479dc01dc66d03ee3e150fb695da"}, + {file = "asyncpg-0.29.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e17b52c6cf83e170d3d865571ba574577ab8e533e7361a2b8ce6157d02c665d3"}, + {file = "asyncpg-0.29.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f100d23f273555f4b19b74a96840aa27b85e99ba4b1f18d4ebff0734e78dc090"}, + {file = "asyncpg-0.29.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48e7c58b516057126b363cec8ca02b804644fd012ef8e6c7e23386b7d5e6ce83"}, + {file = "asyncpg-0.29.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f9ea3f24eb4c49a615573724d88a48bd1b7821c890c2effe04f05382ed9e8810"}, + {file = "asyncpg-0.29.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8d36c7f14a22ec9e928f15f92a48207546ffe68bc412f3be718eedccdf10dc5c"}, + {file = "asyncpg-0.29.0-cp39-cp39-win32.whl", hash = "sha256:797ab8123ebaed304a1fad4d7576d5376c3a006a4100380fb9d517f0b59c1ab2"}, + {file = "asyncpg-0.29.0-cp39-cp39-win_amd64.whl", hash = "sha256:cce08a178858b426ae1aa8409b5cc171def45d4293626e7aa6510696d46decd8"}, + {file = "asyncpg-0.29.0.tar.gz", hash = "sha256:d1c49e1f44fffafd9a55e1a9b101590859d881d639ea2922516f5d9c512d354e"}, +] + +[package.dependencies] +async-timeout = {version = ">=4.0.3", markers = "python_version < \"3.12.0\""} + +[package.extras] +docs = ["Sphinx (>=5.3.0,<5.4.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["flake8 (>=6.1,<7.0)", "uvloop (>=0.15.3)"] + [[package]] name = "attrs" version = "23.2.0" @@ -249,6 +348,124 @@ files = [ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "deprecation" +version = "2.1.0" +description = "A library to handle automated deprecations" +optional = true +python-versions = "*" +files = [ + {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"}, + {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"}, +] + +[package.dependencies] +packaging = "*" + +[[package]] +name = "dnspython" +version = "2.6.1" +description = "DNS toolkit" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, + {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=41)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=0.9.25)"] +idna = ["idna (>=3.6)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "email-validator" +version = "2.2.0" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, + {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + +[[package]] +name = "fastapi" +version = "0.111.0" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.111.0-py3-none-any.whl", hash = "sha256:97ecbf994be0bcbdadedf88c3150252bed7b2087075ac99735403b1b76cc8fc0"}, + {file = "fastapi-0.111.0.tar.gz", hash = "sha256:b9db9dd147c91cb8b769f7183535773d8741dd46f9dc6676cd82eab510228cd7"}, +] + +[package.dependencies] +email_validator = ">=2.0.0" +fastapi-cli = ">=0.0.2" +httpx = ">=0.23.0" +jinja2 = ">=2.11.2" +orjson = ">=3.2.1" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +python-multipart = ">=0.0.7" +starlette = ">=0.37.2,<0.38.0" +typing-extensions = ">=4.8.0" +ujson = ">=4.0.1,<4.0.2 || >4.0.2,<4.1.0 || >4.1.0,<4.2.0 || >4.2.0,<4.3.0 || >4.3.0,<5.0.0 || >5.0.0,<5.1.0 || >5.1.0" +uvicorn = {version = ">=0.12.0", extras = ["standard"]} + +[package.extras] +all = ["email_validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "fastapi-cli" +version = "0.0.4" +description = "Run and manage FastAPI apps from the command line with FastAPI CLI. 🚀" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi_cli-0.0.4-py3-none-any.whl", hash = "sha256:a2552f3a7ae64058cdbb530be6fa6dbfc975dc165e4fa66d224c3d396e25e809"}, + {file = "fastapi_cli-0.0.4.tar.gz", hash = "sha256:e2e9ffaffc1f7767f488d6da34b6f5a377751c996f397902eb6abb99a67bde32"}, +] + +[package.dependencies] +typer = ">=0.12.3" + +[package.extras] +standard = ["fastapi", "uvicorn[standard] (>=0.15.0)"] + [[package]] name = "frozenlist" version = "1.4.1" @@ -335,6 +552,21 @@ files = [ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, ] +[[package]] +name = "gotrue" +version = "2.5.4" +description = "Python Client Library for Supabase Auth" +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "gotrue-2.5.4-py3-none-any.whl", hash = "sha256:6f45003bc73cdee612a2d0be79cffed39c91cc8ad43a7440c02c320c7ad03a8e"}, + {file = "gotrue-2.5.4.tar.gz", hash = "sha256:acf0644a2e5d1bd70f66452361bfea4ba9621a0354a13154a333671a4c751c53"}, +] + +[package.dependencies] +httpx = ">=0.23,<0.28" +pydantic = ">=1.10,<3" + [[package]] name = "greenlet" version = "3.0.3" @@ -406,6 +638,110 @@ files = [ docs = ["Sphinx", "furo"] test = ["objgraph", "psutil"] +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httptools" +version = "0.6.1" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, + {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, + {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, + {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"}, + {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"}, + {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"}, + {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +] + +[package.extras] +test = ["Cython (>=0.29.24,<0.30.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + [[package]] name = "idna" version = "3.7" @@ -417,6 +753,23 @@ files = [ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] +[[package]] +name = "jinja2" +version = "3.1.4" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, + {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + [[package]] name = "jsonpatch" version = "1.33" @@ -523,6 +876,110 @@ pydantic = [ ] requests = ">=2,<3" +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "multidict" version = "6.0.5" @@ -733,6 +1190,23 @@ files = [ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] +[[package]] +name = "postgrest" +version = "0.16.8" +description = "PostgREST client for Python. This library provides an ORM interface to PostgREST." +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "postgrest-0.16.8-py3-none-any.whl", hash = "sha256:c353a24452f51ab9760cf2b884c4b7457a2653ff36444e66b12615bc4cc8e23e"}, + {file = "postgrest-0.16.8.tar.gz", hash = "sha256:7b3802a514dc1e0fc8b5bbdeb2c99af35a0bd910e4ddb17855ca4e3422350c84"}, +] + +[package.dependencies] +deprecation = ">=2.1.0,<3.0.0" +httpx = ">=0.24,<0.28" +pydantic = ">=1.9,<3.0" +strenum = ">=0.4.9,<0.5.0" + [[package]] name = "pydantic" version = "2.7.4" @@ -843,6 +1317,62 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pygments" +version = "2.18.0" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, + {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = true +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.9" +description = "A streaming multipart parser for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, + {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, +] + +[package.extras] +dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] + [[package]] name = "pyyaml" version = "6.0.1" @@ -903,6 +1433,22 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "realtime" +version = "1.0.6" +description = "" +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "realtime-1.0.6-py3-none-any.whl", hash = "sha256:c66918a106d8ef348d1821f2dbf6683d8833825580d95b2fdea9995406b42838"}, + {file = "realtime-1.0.6.tar.gz", hash = "sha256:2be0d8a6305513d423604ee319216108fc20105cb7438922d5c8958c48f40a47"}, +] + +[package.dependencies] +python-dateutil = ">=2.8.1,<3.0.0" +typing-extensions = ">=4.12.2,<5.0.0" +websockets = ">=11,<13" + [[package]] name = "requests" version = "2.32.3" @@ -924,6 +1470,57 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "shellingham" +version = "1.5.4" +description = "Tool to Detect Surrounding Shell" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, + {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = true +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + [[package]] name = "sqlalchemy" version = "2.0.31" @@ -1011,6 +1608,103 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "sqlmodel" +version = "0.0.19" +description = "SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness." +optional = false +python-versions = ">=3.7" +files = [ + {file = "sqlmodel-0.0.19-py3-none-any.whl", hash = "sha256:6c8125d4101970d031e9aae970b20cbeaf44149989f8366d939f4ab21aab8763"}, + {file = "sqlmodel-0.0.19.tar.gz", hash = "sha256:95449b0b48a40a3eecf0a629fa5735b9dfc8a5574a91090d24ca17f02246ad96"}, +] + +[package.dependencies] +pydantic = ">=1.10.13,<3.0.0" +SQLAlchemy = ">=2.0.14,<2.1.0" + +[[package]] +name = "starlette" +version = "0.37.2" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.8" +files = [ + {file = "starlette-0.37.2-py3-none-any.whl", hash = "sha256:6fe59f29268538e5d0d182f2791a479a0c64638e6935d1c6989e63fb2699c6ee"}, + {file = "starlette-0.37.2.tar.gz", hash = "sha256:9af890290133b79fc3db55474ade20f6220a364a0402e0b556e7cd5e1e093823"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] + +[[package]] +name = "storage3" +version = "0.7.6" +description = "Supabase Storage client for Python." +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "storage3-0.7.6-py3-none-any.whl", hash = "sha256:d8c23bf87b3a88cafb03761b7f936e4e49daca67741d571513edf746e0f8ba72"}, + {file = "storage3-0.7.6.tar.gz", hash = "sha256:0b7781cea7fe6382e6b9349b84395808c5f4203dfcac31478304eedc2f81acf6"}, +] + +[package.dependencies] +httpx = ">=0.24,<0.28" +python-dateutil = ">=2.8.2,<3.0.0" +typing-extensions = ">=4.2.0,<5.0.0" + +[[package]] +name = "strenum" +version = "0.4.15" +description = "An Enum that inherits from str." +optional = true +python-versions = "*" +files = [ + {file = "StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659"}, + {file = "StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff"}, +] + +[package.extras] +docs = ["myst-parser[linkify]", "sphinx", "sphinx-rtd-theme"] +release = ["twine"] +test = ["pylint", "pytest", "pytest-black", "pytest-cov", "pytest-pylint"] + +[[package]] +name = "supabase" +version = "2.5.1" +description = "Supabase client for Python." +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "supabase-2.5.1-py3-none-any.whl", hash = "sha256:74a1f24f04fede1967ef084b50dea688228f7b10eb2f9d73350fe2251a865188"}, + {file = "supabase-2.5.1.tar.gz", hash = "sha256:c50e0eba5b03de3abd5ac0f887957ca43558ba44c4d17bb44e73ec454b41734c"}, +] + +[package.dependencies] +gotrue = ">=1.3,<3.0" +httpx = ">=0.24,<0.28" +postgrest = ">=0.14,<0.17.0" +realtime = ">=1.0.0,<2.0.0" +storage3 = ">=0.5.3,<0.8.0" +supafunc = ">=0.3.1,<0.5.0" + +[[package]] +name = "supafunc" +version = "0.4.6" +description = "Library for Supabase Functions" +optional = true +python-versions = "<4.0,>=3.8" +files = [ + {file = "supafunc-0.4.6-py3-none-any.whl", hash = "sha256:f7ca7b244365e171da7055a64edb462c2ec449cdaa210fc418cfccd132f4cf98"}, + {file = "supafunc-0.4.6.tar.gz", hash = "sha256:92db51f8f8568d1430285219c9c0072e44207409c416622d7387f609e31928a6"}, +] + +[package.dependencies] +httpx = ">=0.24,<0.28" + [[package]] name = "tenacity" version = "8.4.2" @@ -1026,6 +1720,23 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "typer" +version = "0.12.3" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.7" +files = [ + {file = "typer-0.12.3-py3-none-any.whl", hash = "sha256:070d7ca53f785acbccba8e7d28b08dcd88f79f1fbda035ade0aecec71ca5c914"}, + {file = "typer-0.12.3.tar.gz", hash = "sha256:49e73131481d804288ef62598d97a1ceef3058905aa536a1134f90891ba35482"}, +] + +[package.dependencies] +click = ">=8.0.0" +rich = ">=10.11.0" +shellingham = ">=1.3.0" +typing-extensions = ">=3.7.4.3" + [[package]] name = "typing-extensions" version = "4.12.2" @@ -1037,6 +1748,93 @@ files = [ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] +[[package]] +name = "ujson" +version = "5.10.0" +description = "Ultra fast JSON encoder and decoder for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ujson-5.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2601aa9ecdbee1118a1c2065323bda35e2c5a2cf0797ef4522d485f9d3ef65bd"}, + {file = "ujson-5.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:348898dd702fc1c4f1051bc3aacbf894caa0927fe2c53e68679c073375f732cf"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22cffecf73391e8abd65ef5f4e4dd523162a3399d5e84faa6aebbf9583df86d6"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26b0e2d2366543c1bb4fbd457446f00b0187a2bddf93148ac2da07a53fe51569"}, + {file = "ujson-5.10.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:caf270c6dba1be7a41125cd1e4fc7ba384bf564650beef0df2dd21a00b7f5770"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a245d59f2ffe750446292b0094244df163c3dc96b3ce152a2c837a44e7cda9d1"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:94a87f6e151c5f483d7d54ceef83b45d3a9cca7a9cb453dbdbb3f5a6f64033f5"}, + {file = "ujson-5.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:29b443c4c0a113bcbb792c88bea67b675c7ca3ca80c3474784e08bba01c18d51"}, + {file = "ujson-5.10.0-cp310-cp310-win32.whl", hash = "sha256:c18610b9ccd2874950faf474692deee4223a994251bc0a083c114671b64e6518"}, + {file = "ujson-5.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:924f7318c31874d6bb44d9ee1900167ca32aa9b69389b98ecbde34c1698a250f"}, + {file = "ujson-5.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a5b366812c90e69d0f379a53648be10a5db38f9d4ad212b60af00bd4048d0f00"}, + {file = "ujson-5.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:502bf475781e8167f0f9d0e41cd32879d120a524b22358e7f205294224c71126"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b91b5d0d9d283e085e821651184a647699430705b15bf274c7896f23fe9c9d8"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:129e39af3a6d85b9c26d5577169c21d53821d8cf68e079060602e861c6e5da1b"}, + {file = "ujson-5.10.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f77b74475c462cb8b88680471193064d3e715c7c6074b1c8c412cb526466efe9"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7ec0ca8c415e81aa4123501fee7f761abf4b7f386aad348501a26940beb1860f"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab13a2a9e0b2865a6c6db9271f4b46af1c7476bfd51af1f64585e919b7c07fd4"}, + {file = "ujson-5.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:57aaf98b92d72fc70886b5a0e1a1ca52c2320377360341715dd3933a18e827b1"}, + {file = "ujson-5.10.0-cp311-cp311-win32.whl", hash = "sha256:2987713a490ceb27edff77fb184ed09acdc565db700ee852823c3dc3cffe455f"}, + {file = "ujson-5.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:f00ea7e00447918ee0eff2422c4add4c5752b1b60e88fcb3c067d4a21049a720"}, + {file = "ujson-5.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:98ba15d8cbc481ce55695beee9f063189dce91a4b08bc1d03e7f0152cd4bbdd5"}, + {file = "ujson-5.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9d2edbf1556e4f56e50fab7d8ff993dbad7f54bac68eacdd27a8f55f433578e"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6627029ae4f52d0e1a2451768c2c37c0c814ffc04f796eb36244cf16b8e57043"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8ccb77b3e40b151e20519c6ae6d89bfe3f4c14e8e210d910287f778368bb3d1"}, + {file = "ujson-5.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3caf9cd64abfeb11a3b661329085c5e167abbe15256b3b68cb5d914ba7396f3"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6e32abdce572e3a8c3d02c886c704a38a1b015a1fb858004e03d20ca7cecbb21"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a65b6af4d903103ee7b6f4f5b85f1bfd0c90ba4eeac6421aae436c9988aa64a2"}, + {file = "ujson-5.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:604a046d966457b6cdcacc5aa2ec5314f0e8c42bae52842c1e6fa02ea4bda42e"}, + {file = "ujson-5.10.0-cp312-cp312-win32.whl", hash = "sha256:6dea1c8b4fc921bf78a8ff00bbd2bfe166345f5536c510671bccececb187c80e"}, + {file = "ujson-5.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:38665e7d8290188b1e0d57d584eb8110951a9591363316dd41cf8686ab1d0abc"}, + {file = "ujson-5.10.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:618efd84dc1acbd6bff8eaa736bb6c074bfa8b8a98f55b61c38d4ca2c1f7f287"}, + {file = "ujson-5.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38d5d36b4aedfe81dfe251f76c0467399d575d1395a1755de391e58985ab1c2e"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67079b1f9fb29ed9a2914acf4ef6c02844b3153913eb735d4bf287ee1db6e557"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7d0e0ceeb8fe2468c70ec0c37b439dd554e2aa539a8a56365fd761edb418988"}, + {file = "ujson-5.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:59e02cd37bc7c44d587a0ba45347cc815fb7a5fe48de16bf05caa5f7d0d2e816"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2a890b706b64e0065f02577bf6d8ca3b66c11a5e81fb75d757233a38c07a1f20"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:621e34b4632c740ecb491efc7f1fcb4f74b48ddb55e65221995e74e2d00bbff0"}, + {file = "ujson-5.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9500e61fce0cfc86168b248104e954fead61f9be213087153d272e817ec7b4f"}, + {file = "ujson-5.10.0-cp313-cp313-win32.whl", hash = "sha256:4c4fc16f11ac1612f05b6f5781b384716719547e142cfd67b65d035bd85af165"}, + {file = "ujson-5.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:4573fd1695932d4f619928fd09d5d03d917274381649ade4328091ceca175539"}, + {file = "ujson-5.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a984a3131da7f07563057db1c3020b1350a3e27a8ec46ccbfbf21e5928a43050"}, + {file = "ujson-5.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:73814cd1b9db6fc3270e9d8fe3b19f9f89e78ee9d71e8bd6c9a626aeaeaf16bd"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:61e1591ed9376e5eddda202ec229eddc56c612b61ac6ad07f96b91460bb6c2fb"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2c75269f8205b2690db4572a4a36fe47cd1338e4368bc73a7a0e48789e2e35a"}, + {file = "ujson-5.10.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7223f41e5bf1f919cd8d073e35b229295aa8e0f7b5de07ed1c8fddac63a6bc5d"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d4dc2fd6b3067c0782e7002ac3b38cf48608ee6366ff176bbd02cf969c9c20fe"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:232cc85f8ee3c454c115455195a205074a56ff42608fd6b942aa4c378ac14dd7"}, + {file = "ujson-5.10.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cc6139531f13148055d691e442e4bc6601f6dba1e6d521b1585d4788ab0bfad4"}, + {file = "ujson-5.10.0-cp38-cp38-win32.whl", hash = "sha256:e7ce306a42b6b93ca47ac4a3b96683ca554f6d35dd8adc5acfcd55096c8dfcb8"}, + {file = "ujson-5.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:e82d4bb2138ab05e18f089a83b6564fee28048771eb63cdecf4b9b549de8a2cc"}, + {file = "ujson-5.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dfef2814c6b3291c3c5f10065f745a1307d86019dbd7ea50e83504950136ed5b"}, + {file = "ujson-5.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4734ee0745d5928d0ba3a213647f1c4a74a2a28edc6d27b2d6d5bd9fa4319e27"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47ebb01bd865fdea43da56254a3930a413f0c5590372a1241514abae8aa7c76"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dee5e97c2496874acbf1d3e37b521dd1f307349ed955e62d1d2f05382bc36dd5"}, + {file = "ujson-5.10.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7490655a2272a2d0b072ef16b0b58ee462f4973a8f6bbe64917ce5e0a256f9c0"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba17799fcddaddf5c1f75a4ba3fd6441f6a4f1e9173f8a786b42450851bd74f1"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2aff2985cef314f21d0fecc56027505804bc78802c0121343874741650a4d3d1"}, + {file = "ujson-5.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ad88ac75c432674d05b61184178635d44901eb749786c8eb08c102330e6e8996"}, + {file = "ujson-5.10.0-cp39-cp39-win32.whl", hash = "sha256:2544912a71da4ff8c4f7ab5606f947d7299971bdd25a45e008e467ca638d13c9"}, + {file = "ujson-5.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:3ff201d62b1b177a46f113bb43ad300b424b7847f9c5d38b1b4ad8f75d4a282a"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5b6fee72fa77dc172a28f21693f64d93166534c263adb3f96c413ccc85ef6e64"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:61d0af13a9af01d9f26d2331ce49bb5ac1fb9c814964018ac8df605b5422dcb3"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecb24f0bdd899d368b715c9e6664166cf694d1e57be73f17759573a6986dd95a"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fbd8fd427f57a03cff3ad6574b5e299131585d9727c8c366da4624a9069ed746"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:beeaf1c48e32f07d8820c705ff8e645f8afa690cca1544adba4ebfa067efdc88"}, + {file = "ujson-5.10.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:baed37ea46d756aca2955e99525cc02d9181de67f25515c468856c38d52b5f3b"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7663960f08cd5a2bb152f5ee3992e1af7690a64c0e26d31ba7b3ff5b2ee66337"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8640fb4072d36b08e95a3a380ba65779d356b2fee8696afeb7794cf0902d0a1"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78778a3aa7aafb11e7ddca4e29f46bc5139131037ad628cc10936764282d6753"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0111b27f2d5c820e7f2dbad7d48e3338c824e7ac4d2a12da3dc6061cc39c8e6"}, + {file = "ujson-5.10.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:c66962ca7565605b355a9ed478292da628b8f18c0f2793021ca4425abf8b01e5"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ba43cc34cce49cf2d4bc76401a754a81202d8aa926d0e2b79f0ee258cb15d3a4"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ac56eb983edce27e7f51d05bc8dd820586c6e6be1c5216a6809b0c668bb312b8"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44bd4b23a0e723bf8b10628288c2c7c335161d6840013d4d5de20e48551773b"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c10f4654e5326ec14a46bcdeb2b685d4ada6911050aa8baaf3501e57024b804"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0de4971a89a762398006e844ae394bd46991f7c385d7a6a3b93ba229e6dac17e"}, + {file = "ujson-5.10.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e1402f0564a97d2a52310ae10a64d25bcef94f8dd643fcf5d310219d915484f7"}, + {file = "ujson-5.10.0.tar.gz", hash = "sha256:b3cd8f3c5d8c7738257f1018880444f7b7d9b66232c64649f562d7ba86ad4bc1"}, +] + [[package]] name = "urllib3" version = "2.2.2" @@ -1054,6 +1852,243 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.30.1" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.30.1-py3-none-any.whl", hash = "sha256:cd17daa7f3b9d7a24de3617820e634d0933b69eed8e33a516071174427238c81"}, + {file = "uvicorn-0.30.1.tar.gz", hash = "sha256:d46cd8e0fd80240baffbcd9ec1012a712938754afcf81bce56c024c1656aece8"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.19.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de4313d7f575474c8f5a12e163f6d89c0a878bc49219641d49e6f1444369a90e"}, + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5588bd21cf1fcf06bded085f37e43ce0e00424197e7c10e77afd4bbefffef428"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b1fd71c3843327f3bbc3237bedcdb6504fd50368ab3e04d0410e52ec293f5b8"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a05128d315e2912791de6088c34136bfcdd0c7cbc1cf85fd6fd1bb321b7c849"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cd81bdc2b8219cb4b2556eea39d2e36bfa375a2dd021404f90a62e44efaaf957"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f17766fb6da94135526273080f3455a112f82570b2ee5daa64d682387fe0dcd"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ce6b0af8f2729a02a5d1575feacb2a94fc7b2e983868b009d51c9a9d2149bef"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31e672bb38b45abc4f26e273be83b72a0d28d074d5b370fc4dcf4c4eb15417d2"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:570fc0ed613883d8d30ee40397b79207eedd2624891692471808a95069a007c1"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5138821e40b0c3e6c9478643b4660bd44372ae1e16a322b8fc07478f92684e24"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:91ab01c6cd00e39cde50173ba4ec68a1e578fee9279ba64f5221810a9e786533"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:47bf3e9312f63684efe283f7342afb414eea4d3011542155c7e625cd799c3b12"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:da8435a3bd498419ee8c13c34b89b5005130a476bda1d6ca8cfdde3de35cd650"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:02506dc23a5d90e04d4f65c7791e65cf44bd91b37f24cfc3ef6cf2aff05dc7ec"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2693049be9d36fef81741fddb3f441673ba12a34a704e7b4361efb75cf30befc"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7010271303961c6f0fe37731004335401eb9075a12680738731e9c92ddd96ad6"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5daa304d2161d2918fa9a17d5635099a2f78ae5b5960e742b2fcfbb7aefaa593"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7207272c9520203fea9b93843bb775d03e1cf88a80a936ce760f60bb5add92f3"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:78ab247f0b5671cc887c31d33f9b3abfb88d2614b84e4303f1a63b46c046c8bd"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:472d61143059c84947aa8bb74eabbace30d577a03a1805b77933d6bd13ddebbd"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45bf4c24c19fb8a50902ae37c5de50da81de4922af65baf760f7c0c42e1088be"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271718e26b3e17906b28b67314c45d19106112067205119dddbd834c2b7ce797"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:34175c9fd2a4bc3adc1380e1261f60306344e3407c20a4d684fd5f3be010fa3d"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e27f100e1ff17f6feeb1f33968bc185bf8ce41ca557deee9d9bbbffeb72030b7"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13dfdf492af0aa0a0edf66807d2b465607d11c4fa48f4a1fd41cbea5b18e8e8b"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e3d4e85ac060e2342ff85e90d0c04157acb210b9ce508e784a944f852a40e67"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca4956c9ab567d87d59d49fa3704cf29e37109ad348f2d5223c9bf761a332e7"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f467a5fd23b4fc43ed86342641f3936a68ded707f4627622fa3f82a120e18256"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:492e2c32c2af3f971473bc22f086513cedfc66a130756145a931a90c3958cb17"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2df95fca285a9f5bfe730e51945ffe2fa71ccbfdde3b0da5772b4ee4f2e770d5"}, + {file = "uvloop-0.19.0.tar.gz", hash = "sha256:0246f4fd1bf2bf702e06b0d45ee91677ee5c31242f39aab4ea6fe0c51aedd0fd"}, +] + +[package.extras] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "watchfiles" +version = "0.22.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.22.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:da1e0a8caebf17976e2ffd00fa15f258e14749db5e014660f53114b676e68538"}, + {file = "watchfiles-0.22.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61af9efa0733dc4ca462347becb82e8ef4945aba5135b1638bfc20fad64d4f0e"}, + {file = "watchfiles-0.22.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d9188979a58a096b6f8090e816ccc3f255f137a009dd4bbec628e27696d67c1"}, + {file = "watchfiles-0.22.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2bdadf6b90c099ca079d468f976fd50062905d61fae183f769637cb0f68ba59a"}, + {file = "watchfiles-0.22.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:067dea90c43bf837d41e72e546196e674f68c23702d3ef80e4e816937b0a3ffd"}, + {file = "watchfiles-0.22.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbf8a20266136507abf88b0df2328e6a9a7c7309e8daff124dda3803306a9fdb"}, + {file = "watchfiles-0.22.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1235c11510ea557fe21be5d0e354bae2c655a8ee6519c94617fe63e05bca4171"}, + {file = "watchfiles-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2444dc7cb9d8cc5ab88ebe792a8d75709d96eeef47f4c8fccb6df7c7bc5be71"}, + {file = "watchfiles-0.22.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c5af2347d17ab0bd59366db8752d9e037982e259cacb2ba06f2c41c08af02c39"}, + {file = "watchfiles-0.22.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9624a68b96c878c10437199d9a8b7d7e542feddda8d5ecff58fdc8e67b460848"}, + {file = "watchfiles-0.22.0-cp310-none-win32.whl", hash = "sha256:4b9f2a128a32a2c273d63eb1fdbf49ad64852fc38d15b34eaa3f7ca2f0d2b797"}, + {file = "watchfiles-0.22.0-cp310-none-win_amd64.whl", hash = "sha256:2627a91e8110b8de2406d8b2474427c86f5a62bf7d9ab3654f541f319ef22bcb"}, + {file = "watchfiles-0.22.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8c39987a1397a877217be1ac0fb1d8b9f662c6077b90ff3de2c05f235e6a8f96"}, + {file = "watchfiles-0.22.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a927b3034d0672f62fb2ef7ea3c9fc76d063c4b15ea852d1db2dc75fe2c09696"}, + {file = "watchfiles-0.22.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052d668a167e9fc345c24203b104c313c86654dd6c0feb4b8a6dfc2462239249"}, + {file = "watchfiles-0.22.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e45fb0d70dda1623a7045bd00c9e036e6f1f6a85e4ef2c8ae602b1dfadf7550"}, + {file = "watchfiles-0.22.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c49b76a78c156979759d759339fb62eb0549515acfe4fd18bb151cc07366629c"}, + {file = "watchfiles-0.22.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4a65474fd2b4c63e2c18ac67a0c6c66b82f4e73e2e4d940f837ed3d2fd9d4da"}, + {file = "watchfiles-0.22.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1cc0cba54f47c660d9fa3218158b8963c517ed23bd9f45fe463f08262a4adae1"}, + {file = "watchfiles-0.22.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94ebe84a035993bb7668f58a0ebf998174fb723a39e4ef9fce95baabb42b787f"}, + {file = "watchfiles-0.22.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e0f0a874231e2839abbf473256efffe577d6ee2e3bfa5b540479e892e47c172d"}, + {file = "watchfiles-0.22.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:213792c2cd3150b903e6e7884d40660e0bcec4465e00563a5fc03f30ea9c166c"}, + {file = "watchfiles-0.22.0-cp311-none-win32.whl", hash = "sha256:b44b70850f0073b5fcc0b31ede8b4e736860d70e2dbf55701e05d3227a154a67"}, + {file = "watchfiles-0.22.0-cp311-none-win_amd64.whl", hash = "sha256:00f39592cdd124b4ec5ed0b1edfae091567c72c7da1487ae645426d1b0ffcad1"}, + {file = "watchfiles-0.22.0-cp311-none-win_arm64.whl", hash = "sha256:3218a6f908f6a276941422b035b511b6d0d8328edd89a53ae8c65be139073f84"}, + {file = "watchfiles-0.22.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c7b978c384e29d6c7372209cbf421d82286a807bbcdeb315427687f8371c340a"}, + {file = "watchfiles-0.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd4c06100bce70a20c4b81e599e5886cf504c9532951df65ad1133e508bf20be"}, + {file = "watchfiles-0.22.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:425440e55cd735386ec7925f64d5dde392e69979d4c8459f6bb4e920210407f2"}, + {file = "watchfiles-0.22.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68fe0c4d22332d7ce53ad094622b27e67440dacefbaedd29e0794d26e247280c"}, + {file = "watchfiles-0.22.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8a31bfd98f846c3c284ba694c6365620b637debdd36e46e1859c897123aa232"}, + {file = "watchfiles-0.22.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc2e8fe41f3cac0660197d95216c42910c2b7e9c70d48e6d84e22f577d106fc1"}, + {file = "watchfiles-0.22.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b7cc10261c2786c41d9207193a85c1db1b725cf87936df40972aab466179b6"}, + {file = "watchfiles-0.22.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28585744c931576e535860eaf3f2c0ec7deb68e3b9c5a85ca566d69d36d8dd27"}, + {file = "watchfiles-0.22.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:00095dd368f73f8f1c3a7982a9801190cc88a2f3582dd395b289294f8975172b"}, + {file = "watchfiles-0.22.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:52fc9b0dbf54d43301a19b236b4a4614e610605f95e8c3f0f65c3a456ffd7d35"}, + {file = "watchfiles-0.22.0-cp312-none-win32.whl", hash = "sha256:581f0a051ba7bafd03e17127735d92f4d286af941dacf94bcf823b101366249e"}, + {file = "watchfiles-0.22.0-cp312-none-win_amd64.whl", hash = "sha256:aec83c3ba24c723eac14225194b862af176d52292d271c98820199110e31141e"}, + {file = "watchfiles-0.22.0-cp312-none-win_arm64.whl", hash = "sha256:c668228833c5619f6618699a2c12be057711b0ea6396aeaece4ded94184304ea"}, + {file = "watchfiles-0.22.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d47e9ef1a94cc7a536039e46738e17cce058ac1593b2eccdede8bf72e45f372a"}, + {file = "watchfiles-0.22.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28f393c1194b6eaadcdd8f941307fc9bbd7eb567995232c830f6aef38e8a6e88"}, + {file = "watchfiles-0.22.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd64f3a4db121bc161644c9e10a9acdb836853155a108c2446db2f5ae1778c3d"}, + {file = "watchfiles-0.22.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2abeb79209630da981f8ebca30a2c84b4c3516a214451bfc5f106723c5f45843"}, + {file = "watchfiles-0.22.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4cc382083afba7918e32d5ef12321421ef43d685b9a67cc452a6e6e18920890e"}, + {file = "watchfiles-0.22.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d048ad5d25b363ba1d19f92dcf29023988524bee6f9d952130b316c5802069cb"}, + {file = "watchfiles-0.22.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:103622865599f8082f03af4214eaff90e2426edff5e8522c8f9e93dc17caee13"}, + {file = "watchfiles-0.22.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3e1f3cf81f1f823e7874ae563457828e940d75573c8fbf0ee66818c8b6a9099"}, + {file = "watchfiles-0.22.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8597b6f9dc410bdafc8bb362dac1cbc9b4684a8310e16b1ff5eee8725d13dcd6"}, + {file = "watchfiles-0.22.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b04a2cbc30e110303baa6d3ddce8ca3664bc3403be0f0ad513d1843a41c97d1"}, + {file = "watchfiles-0.22.0-cp38-none-win32.whl", hash = "sha256:b610fb5e27825b570554d01cec427b6620ce9bd21ff8ab775fc3a32f28bba63e"}, + {file = "watchfiles-0.22.0-cp38-none-win_amd64.whl", hash = "sha256:fe82d13461418ca5e5a808a9e40f79c1879351fcaeddbede094028e74d836e86"}, + {file = "watchfiles-0.22.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3973145235a38f73c61474d56ad6199124e7488822f3a4fc97c72009751ae3b0"}, + {file = "watchfiles-0.22.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:280a4afbc607cdfc9571b9904b03a478fc9f08bbeec382d648181c695648202f"}, + {file = "watchfiles-0.22.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a0d883351a34c01bd53cfa75cd0292e3f7e268bacf2f9e33af4ecede7e21d1d"}, + {file = "watchfiles-0.22.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9165bcab15f2b6d90eedc5c20a7f8a03156b3773e5fb06a790b54ccecdb73385"}, + {file = "watchfiles-0.22.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc1b9b56f051209be458b87edb6856a449ad3f803315d87b2da4c93b43a6fe72"}, + {file = "watchfiles-0.22.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dc1fc25a1dedf2dd952909c8e5cb210791e5f2d9bc5e0e8ebc28dd42fed7562"}, + {file = "watchfiles-0.22.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc92d2d2706d2b862ce0568b24987eba51e17e14b79a1abcd2edc39e48e743c8"}, + {file = "watchfiles-0.22.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97b94e14b88409c58cdf4a8eaf0e67dfd3ece7e9ce7140ea6ff48b0407a593ec"}, + {file = "watchfiles-0.22.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:96eec15e5ea7c0b6eb5bfffe990fc7c6bd833acf7e26704eb18387fb2f5fd087"}, + {file = "watchfiles-0.22.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:28324d6b28bcb8d7c1041648d7b63be07a16db5510bea923fc80b91a2a6cbed6"}, + {file = "watchfiles-0.22.0-cp39-none-win32.whl", hash = "sha256:8c3e3675e6e39dc59b8fe5c914a19d30029e36e9f99468dddffd432d8a7b1c93"}, + {file = "watchfiles-0.22.0-cp39-none-win_amd64.whl", hash = "sha256:25c817ff2a86bc3de3ed2df1703e3d24ce03479b27bb4527c57e722f8554d971"}, + {file = "watchfiles-0.22.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b810a2c7878cbdecca12feae2c2ae8af59bea016a78bc353c184fa1e09f76b68"}, + {file = "watchfiles-0.22.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:f7e1f9c5d1160d03b93fc4b68a0aeb82fe25563e12fbcdc8507f8434ab6f823c"}, + {file = "watchfiles-0.22.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:030bc4e68d14bcad2294ff68c1ed87215fbd9a10d9dea74e7cfe8a17869785ab"}, + {file = "watchfiles-0.22.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace7d060432acde5532e26863e897ee684780337afb775107c0a90ae8dbccfd2"}, + {file = "watchfiles-0.22.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5834e1f8b71476a26df97d121c0c0ed3549d869124ed2433e02491553cb468c2"}, + {file = "watchfiles-0.22.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0bc3b2f93a140df6806c8467c7f51ed5e55a931b031b5c2d7ff6132292e803d6"}, + {file = "watchfiles-0.22.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8fdebb655bb1ba0122402352b0a4254812717a017d2dc49372a1d47e24073795"}, + {file = "watchfiles-0.22.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c8e0aa0e8cc2a43561e0184c0513e291ca891db13a269d8d47cb9841ced7c71"}, + {file = "watchfiles-0.22.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2f350cbaa4bb812314af5dab0eb8d538481e2e2279472890864547f3fe2281ed"}, + {file = "watchfiles-0.22.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7a74436c415843af2a769b36bf043b6ccbc0f8d784814ba3d42fc961cdb0a9dc"}, + {file = "watchfiles-0.22.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00ad0bcd399503a84cc688590cdffbe7a991691314dde5b57b3ed50a41319a31"}, + {file = "watchfiles-0.22.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72a44e9481afc7a5ee3291b09c419abab93b7e9c306c9ef9108cb76728ca58d2"}, + {file = "watchfiles-0.22.0.tar.gz", hash = "sha256:988e981aaab4f3955209e7e28c7794acdb690be1efa7f16f8ea5aba7ffdadacb"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "websockets" +version = "12.0" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, + {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, + {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, + {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, + {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, + {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"}, + {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"}, + {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, + {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, + {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, + {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"}, + {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"}, + {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"}, + {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"}, + {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"}, + {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"}, + {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"}, + {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, + {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, + {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"}, + {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"}, + {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"}, + {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"}, + {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, + {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, +] + [[package]] name = "yarl" version = "1.9.4" @@ -1160,4 +2195,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "dc6781ae577a76ccefee8f8286035abf35e9e5b2486c2c671b2d4bbb9c99bdc5" +content-hash = "982c385f8e526d48c67312f0852b1643703473608cfa4ef54f0a1bdae56c346e" diff --git a/backend/core/pyproject.toml b/backend/core/pyproject.toml index 8dfd73d3107a..a8c4823be462 100644 --- a/backend/core/pyproject.toml +++ b/backend/core/pyproject.toml @@ -12,6 +12,15 @@ pydantic = "^2.7.4" langchain-core = "^0.2.10" langchain = "^0.2.6" +[tool.poetry.group.api.dependencies] +aiofiles = "^24.1.0" +python-dotenv = "^1.0.1" +fastapi = "^0.111.0" +sqlmodel = "^0.0.19" +asyncpg = "^0.29.0" +uvicorn = "^0.30.1" +supabase = { version = "^2.5.1", optional = true } + [tool.ruff] line-length = 88 exclude = [".git", "__pycache__", ".mypy_cache", ".pytest_cache"] From f988a8b5c0f305f0073a593168fa31acf39630cf Mon Sep 17 00:00:00 2001 From: aminediro Date: Mon, 1 Jul 2024 17:05:44 +0200 Subject: [PATCH 20/20] vecstore modules --- .../modules/brain/service/brain_service.py | 3 +- .../get_question_context_from_brain.py | 2 +- .../quivr_core/api/modules/rag/rag_service.py | 2 +- .../api/packages/files/loaders/telegram.py | 65 ------------------- .../api/packages/files/processors.py | 2 - .../loaders => vectorstore}/__init__.py | 0 .../{ => packages}/vectorstore/supabase.py | 0 .../quivr_core/api/vectorstore/__init__.py | 0 8 files changed, 3 insertions(+), 71 deletions(-) delete mode 100644 backend/core/quivr_core/api/packages/files/loaders/telegram.py rename backend/core/quivr_core/api/packages/{files/loaders => vectorstore}/__init__.py (100%) rename backend/core/quivr_core/api/{ => packages}/vectorstore/supabase.py (100%) delete mode 100644 backend/core/quivr_core/api/vectorstore/__init__.py diff --git a/backend/core/quivr_core/api/modules/brain/service/brain_service.py b/backend/core/quivr_core/api/modules/brain/service/brain_service.py index 12db36d95f3f..8b5fe10e0486 100644 --- a/backend/core/quivr_core/api/modules/brain/service/brain_service.py +++ b/backend/core/quivr_core/api/modules/brain/service/brain_service.py @@ -22,7 +22,7 @@ IntegrationDescription, ) from quivr_core.api.modules.knowledge.service.knowledge_service import KnowledgeService -from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore +from quivr_core.api.packages.vectorstore.supabase import CustomSupabaseVectorStore logger = get_logger(__name__) @@ -30,7 +30,6 @@ class BrainService: - def __init__(self): self.brain_repository: Brains = Brains() self.brain_user_repository = BrainsUsers() diff --git a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py index 717a6655c20d..7b88efe0a084 100644 --- a/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py +++ b/backend/core/quivr_core/api/modules/brain/service/get_question_context_from_brain.py @@ -7,7 +7,7 @@ from quivr_core.api.modules.upload.service.generate_file_signed_url import ( generate_file_signed_url, ) -from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore +from quivr_core.api.packages.vectorstore.supabase import CustomSupabaseVectorStore logger = get_logger(__name__) diff --git a/backend/core/quivr_core/api/modules/rag/rag_service.py b/backend/core/quivr_core/api/modules/rag/rag_service.py index f00a8e6ce5ec..ab0f79cf1c66 100644 --- a/backend/core/quivr_core/api/modules/rag/rag_service.py +++ b/backend/core/quivr_core/api/modules/rag/rag_service.py @@ -21,7 +21,7 @@ from quivr_core.api.modules.prompt.entity.prompt import Prompt from quivr_core.api.modules.prompt.service.prompt_service import PromptService from quivr_core.api.modules.user.entity.user_identity import UserIdentity -from quivr_core.api.vectorstore.supabase import CustomSupabaseVectorStore +from quivr_core.api.packages.vectorstore.supabase import CustomSupabaseVectorStore from quivr_core.config import RAGConfig from quivr_core.models import ParsedRAGResponse, RAGResponseMetadata from quivr_core.quivr_rag import QuivrQARAG diff --git a/backend/core/quivr_core/api/packages/files/loaders/telegram.py b/backend/core/quivr_core/api/packages/files/loaders/telegram.py deleted file mode 100644 index e114e46751fe..000000000000 --- a/backend/core/quivr_core/api/packages/files/loaders/telegram.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -import json -from pathlib import Path -from typing import List - -from langchain.docstore.document import Document -from langchain_community.document_loaders.base import BaseLoader - - -def concatenate_rows(row: dict) -> str: - """Combine message information in a readable format ready to be used.""" - date = row["date"] - sender = row.get( - "from", "Unknown" - ) # Using .get() to handle cases where 'from' might not be present - - text_content = row.get("text", "") - - # Function to process a single text entity - def process_text_entity(entity): - if isinstance(entity, str): - return entity - elif isinstance(entity, dict) and "text" in entity: - return entity["text"] - return "" - - # Process the text content based on its type - if isinstance(text_content, str): - text = text_content - elif isinstance(text_content, list): - text = "".join(process_text_entity(item) for item in text_content) - else: - text = "" - - # Skip messages with empty text - if not text.strip(): - return "" - - return f"{sender} on {date}: {text}\n\n" - - -class TelegramChatFileLoader(BaseLoader): - """Load from `Telegram chat` dump.""" - - def __init__(self, path: str): - """Initialize with a path.""" - self.file_path = path - - def load(self) -> List[Document]: - """Load documents.""" - p = Path(self.file_path) - - with open(p, encoding="utf8") as f: - d = json.load(f) - - text = "".join( - concatenate_rows(message) - for message in d["messages"] - if message["type"] == "message" - and (isinstance(message["text"], str) or isinstance(message["text"], list)) - ) - metadata = {"source": str(p)} - - return [Document(page_content=text, metadata=metadata)] diff --git a/backend/core/quivr_core/api/packages/files/processors.py b/backend/core/quivr_core/api/packages/files/processors.py index cc7c777dee0a..76e47019f444 100644 --- a/backend/core/quivr_core/api/packages/files/processors.py +++ b/backend/core/quivr_core/api/packages/files/processors.py @@ -12,7 +12,6 @@ from .parsers.odt import process_odt from .parsers.pdf import process_pdf from .parsers.powerpoint import process_powerpoint -from .parsers.telegram import process_telegram from .parsers.txt import process_txt from .parsers.xlsx import process_xlsx @@ -21,7 +20,6 @@ ".csv": process_csv, ".md": process_markdown, ".markdown": process_markdown, - ".telegram": process_telegram, ".m4a": process_audio, ".mp3": process_audio, ".webm": process_audio, diff --git a/backend/core/quivr_core/api/packages/files/loaders/__init__.py b/backend/core/quivr_core/api/packages/vectorstore/__init__.py similarity index 100% rename from backend/core/quivr_core/api/packages/files/loaders/__init__.py rename to backend/core/quivr_core/api/packages/vectorstore/__init__.py diff --git a/backend/core/quivr_core/api/vectorstore/supabase.py b/backend/core/quivr_core/api/packages/vectorstore/supabase.py similarity index 100% rename from backend/core/quivr_core/api/vectorstore/supabase.py rename to backend/core/quivr_core/api/packages/vectorstore/supabase.py diff --git a/backend/core/quivr_core/api/vectorstore/__init__.py b/backend/core/quivr_core/api/vectorstore/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000