Skip to content

Commit

Permalink
feat: n8n integration, broken bolt integration
Browse files Browse the repository at this point in the history
  • Loading branch information
av committed Oct 27, 2024
1 parent 5b3f107 commit 795dcdf
Show file tree
Hide file tree
Showing 20 changed files with 300 additions and 27 deletions.
2 changes: 1 addition & 1 deletion .scripts/seed.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import * as toml from 'jsr:@std/toml';
import * as path from 'jsr:@std/path';
import * as collections from "jsr:@std/collections/deep-merge";

const VERSION = "0.2.12";
const VERSION = "0.2.13";

type ValueSeed = {
// Path relative to the project root
Expand Down
2 changes: 1 addition & 1 deletion app/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@avcodes/harbor-app",
"private": true,
"version": "0.2.12",
"version": "0.2.13",
"type": "module",
"scripts": {
"dev": "vite",
Expand Down
2 changes: 1 addition & 1 deletion app/src-tauri/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@

[package]
name = "harbor-app"
version = "0.2.12"
version = "0.2.13"
description = "A companion app for Harbor LLM toolkit"
authors = ["av"]
edition = "2021"
Expand Down
2 changes: 1 addition & 1 deletion app/src-tauri/tauri.conf.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"$schema": "https://schema.tauri.app/config/2.0.0-rc",
"productName": "Harbor",
"version": "0.2.12",
"version": "0.2.13",
"identifier": "com.harbor.app",
"build": {
"beforeDevCommand": "bun run dev",
Expand Down
6 changes: 6 additions & 0 deletions app/src/serviceMetadata.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -191,4 +191,10 @@ export const serviceMetadata: Record<string, Partial<HarborService>> = {
repopack: {
tags: [HST.satellite, HST.cli],
},
n8n: {
tags: [HST.satellite],
},
bolt: {
tags: [HST.satellite, HST.partial],
}
};
42 changes: 42 additions & 0 deletions bolt/.env.local
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Get your GROQ API Key here -
# https://console.groq.com/keys
# You only need this environment variable set if you want to use Groq models
GROQ_API_KEY=

# Get your Open AI API Key by following these instructions -
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
# You only need this environment variable set if you want to use GPT models
OPENAI_API_KEY=

# Get your Anthropic API Key in your account settings -
# https://console.anthropic.com/settings/keys
# You only need this environment variable set if you want to use Claude models
ANTHROPIC_API_KEY=

# Get your OpenRouter API Key in your account settings -
# https://openrouter.ai/settings/keys
# You only need this environment variable set if you want to use OpenRouter models
OPEN_ROUTER_API_KEY=

# Get your Google Generative AI API Key by following these instructions -
# https://console.cloud.google.com/apis/credentials
# You only need this environment variable set if you want to use Google Generative AI models
GOOGLE_GENERATIVE_AI_API_KEY=

# You only need this environment variable set if you want to use oLLAMA models
# EXAMPLE http://localhost:11434
OLLAMA_API_BASE_URL=http://localhost:33821

# You only need this environment variable set if you want to use OpenAI Like models
OPENAI_LIKE_API_BASE_URL=http://localhost:33821/v1

# Get your OpenAI Like API Key
OPENAI_LIKE_API_KEY=sk-ollama

# Get your Mistral API Key by following these instructions -
# https://console.mistral.ai/api-keys/
# You only need this environment variable set if you want to use Mistral models
MISTRAL_API_KEY=

# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug
6 changes: 6 additions & 0 deletions bolt/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
FROM ghcr.io/coleam00/bolt.new-any-llm:latest

# Only works build-time
COPY .env.local /app/.env.local
RUN pnpm build

9 changes: 9 additions & 0 deletions bolt/model.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import { createOpenAI } from '@ai-sdk/openai';

export function getModel() {
return createOpenAI({
name: 'Ollama',
apiKey: 'sk-ollama',
baseURL: 'http://localhost:33821',
})
}
3 changes: 3 additions & 0 deletions bolt/override.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# Can contain additional environment variables
# specific for the bolt service. See for example:
# https://github.com/coleam00/bolt.new-any-llm/blob/main/.env.example
18 changes: 18 additions & 0 deletions compose.bolt.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
services:
bolt:
container_name: ${HARBOR_CONTAINER_PREFIX}.bolt
image: ${HARBOR_BOLT_IMAGE}:${HARBOR_BOLT_VERSION}
ports:
- ${HARBOR_BOLT_HOST_PORT}:3000
environment:
- NODE_ENV=production
env_file:
- ./.env
- ./bolt/override.env
# this repo can't work with actual env variables,
# so a .env.local file has to be supplied and it'll be
# bundled into the FE and CloudFlare workers
volumes:
- ./bolt/.env.local:/app/.env.local
# - ./bolt/model.ts:/app/app/lib/.server/llm/model.ts
command: bash -c "pnpm run build && pnpm run start"
51 changes: 51 additions & 0 deletions compose.n8n.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
services:
n8n:
image: ${HARBOR_N8N_IMAGE}:${HARBOR_N8N_VERSION}
container_name: ${HARBOR_CONTAINER_PREFIX}.n8n
user: root
ports:
- ${HARBOR_N8N_HOST_PORT}:5678
volumes:
- ${HARBOR_N8N_WORKSPACE}/storage:/home/node/.n8n
- ${HARBOR_N8N_WORKSPACE}/backup:/backup
- ${HARBOR_N8N_WORKSPACE}/shared:/data/shared
depends_on:
n8n-db:
condition: service_healthy
env_file:
- ./.env
- ./n8n/override.env
environment:
- DB_TYPE=postgresdb
- DB_POSTGRESDB_HOST=n8n-db
- DB_POSTGRESDB_USER=${HARBOR_N8N_PG_USER}
- DB_POSTGRESDB_PASSWORD=${HARBOR_N8N_PG_PASSWORD}
- DB_POSTGRESDB_DATABASE=${HARBOR_N8N_PG_DB}
- N8N_DIAGNOSTICS_ENABLED=false
- N8N_PERSONALIZATION_ENABLED=false
- N8N_ENCRYPTION_KEY=${HARBOR_N8N_ENCRYPTION_KEY}
- N8N_USER_MANAGEMENT_JWT_SECRET=${HARBOR_N8N_JWT_SECRET}
networks:
- harbor-network

n8n-db:
image: ${HARBOR_N8N_PG_IMAGE}:${HARBOR_N8N_PG_VERSION}
container_name: ${HARBOR_CONTAINER_PREFIX}.n8n-db
networks:
- harbor-network
ports:
- ${HARBOR_N8N_PG_HOST_PORT}:5432
env_file:
- ./.env
- ${HARBOR_N8N_WORKSPACE}/override.env
environment:
- POSTGRES_USER=${HARBOR_N8N_PG_USER}
- POSTGRES_PASSWORD=${HARBOR_N8N_PG_PASSWORD}
- POSTGRES_DB=${HARBOR_N8N_PG_DB}
volumes:
- ${HARBOR_N8N_WORKSPACE}/db:/var/lib/postgresql/data
healthcheck:
test: ['CMD-SHELL', 'pg_isready -h localhost -U ${HARBOR_N8N_PG_USER} -d ${HARBOR_N8N_PG_DB}']
interval: 5s
timeout: 5s
retries: 10
4 changes: 4 additions & 0 deletions compose.x.bolt.ollama.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
services:
bolt:
environment:
- OLLAMA_API_BASE_URL=http://localhost:${HARBOR_OLLAMA_HOST_PORT}
23 changes: 2 additions & 21 deletions harbor.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1754,26 +1754,7 @@ docker_fsacl() {
}

fix_fs_acl() {
docker_fsacl ./ollama
docker_fsacl ./langfuse
docker_fsacl ./open-webui
docker_fsacl ./tts
docker_fsacl ./librechat
docker_fsacl ./searxng
docker_fsacl ./tabbyapi
docker_fsacl ./litellm
docker_fsacl ./plandex
docker_fsacl ./dify
docker_fsacl ./textgrad
docker_fsacl ./aider
docker_fsacl ./chatui
docker_fsacl ./comfyui
docker_fsacl ./bionicgpt
docker_fsacl ./omnichain
docker_fsacl ./bench
docker_fsacl ./jupyter
docker_fsacl ./ktransformers
docker_fsacl ./anythingllm
docker_fsacl .

docker_fsacl $(eval echo "$(env_manager get hf.cache)")
docker_fsacl $(eval echo "$(env_manager get vllm.cache)")
Expand Down Expand Up @@ -3526,7 +3507,7 @@ run_repopack_command() {
# ========================================================================

# Globals
version="0.2.12"
version="0.2.13"
harbor_repo_url="https://github.com/av/harbor.git"
harbor_release_url="https://api.github.com/repos/av/harbor/releases/latest"
delimiter="|"
Expand Down
3 changes: 3 additions & 0 deletions n8n/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
db/
shared/
storage/
8 changes: 8 additions & 0 deletions n8n/backup/credentials/9LdDQI9lblNjIGIZ.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"createdAt": "2024-10-27T12:40:43.596Z",
"updatedAt": "2024-10-27T12:40:43.595Z",
"id": "9LdDQI9lblNjIGIZ",
"name": "Ollama account",
"data": "U2FsdGVkX1+gq5jFqkLIZ1MN+mZfwTgjc3qA45RGENUK3ootyrLTvqL4cFFG1Uy80jokgSPborQRzYfywShQfA==",
"type": "ollamaApi"
}
121 changes: 121 additions & 0 deletions n8n/backup/workflows/6K7zSSBeRa0z1hi6.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
{
"createdAt": "2024-10-27T12:42:27.372Z",
"updatedAt": "2024-10-27T12:42:27.372Z",
"id": "6K7zSSBeRa0z1hi6",
"name": "Chat with local LLMs using n8n and Ollama",
"active": false,
"nodes": [
{
"parameters": {
"options": {}
},
"id": "475385fa-28f3-45c4-bd1a-10dde79f74f2",
"name": "When chat message received",
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
"position": [
920,
620
],
"webhookId": "ebdeba3f-6b4f-49f3-ba0a-8253dd226161",
"typeVersion": 1.1
},
{
"parameters": {
"model": "llama3.1:8b",
"options": {}
},
"id": "61133dc6-dcd9-44ff-85f2-5d8cc2ce813e",
"name": "Ollama Chat Model",
"type": "@n8n/n8n-nodes-langchain.lmChatOllama",
"position": [
1120,
840
],
"typeVersion": 1,
"credentials": {
"ollamaApi": {
"id": "9LdDQI9lblNjIGIZ",
"name": "Ollama account"
}
}
},
{
"parameters": {
"content": "## Chat with local LLMs using n8n and Ollama\nThis n8n workflow allows you to seamlessly interact with your self-hosted Large Language Models (LLMs) through a user-friendly chat interface. By connecting to Ollama, a powerful tool for managing local LLMs, you can send prompts and receive AI-generated responses directly within n8n.\n\n### How it works\n1. When chat message received: Captures the user's input from the chat interface.\n2. Chat LLM Chain: Sends the input to the Ollama server and receives the AI-generated response.\n3. Delivers the LLM's response back to the chat interface.\n\n### Set up steps\n* Make sure Ollama is installed and running on your machine before executing this workflow.\n* Edit the Ollama address if different from the default.\n",
"height": 473,
"width": 485
},
"id": "3e89571f-7c87-44c6-8cfd-4903d5e1cdc5",
"name": "Sticky Note",
"type": "n8n-nodes-base.stickyNote",
"position": [
380,
240
],
"typeVersion": 1
},
{
"parameters": {
"content": "## Ollama setup\n* Connect to your local Ollama, usually on http://localhost:11434\n* If running in Docker, make sure that the n8n container has access to the host's network in order to connect to Ollama. You can do this by passing `--net=host` option when starting the n8n Docker container",
"height": 258,
"width": 368,
"color": 6
},
"id": "9345cadf-a72e-4d3d-b9f0-d670744065fe",
"name": "Sticky Note1",
"type": "n8n-nodes-base.stickyNote",
"position": [
1300,
840
],
"typeVersion": 1
},
{
"parameters": {},
"id": "eeffdd4e-6795-4ebc-84f7-87b5ac4167d9",
"name": "Chat LLM Chain",
"type": "@n8n/n8n-nodes-langchain.chainLlm",
"position": [
1140,
620
],
"typeVersion": 1.4
}
],
"connections": {
"Ollama Chat Model": {
"ai_languageModel": [
[
{
"node": "Chat LLM Chain",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"When chat message received": {
"main": [
[
{
"node": "Chat LLM Chain",
"type": "main",
"index": 0
}
]
]
}
},
"settings": {
"executionOrder": "v1"
},
"staticData": null,
"meta": {
"templateId": "2384",
"templateCredsSetupCompleted": true
},
"pinData": {},
"versionId": "9017d43d-329d-43bc-9046-70057a8da66c",
"triggerCount": 0,
"tags": []
}
2 changes: 2 additions & 0 deletions n8n/override.env
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# This file can be used for additional environment variables
# specific to the n8n service
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@avcodes/harbor",
"version": "0.2.12",
"version": "0.2.13",
"description": "Effortlessly run LLM backends, APIs, frontends, and services with one command.",
"private": false,
"author": "av <av@av.codes> (https://av.codes)",
Expand Down
Loading

0 comments on commit 795dcdf

Please sign in to comment.