-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_without_docker.sh
38 lines (28 loc) · 980 Bytes
/
run_without_docker.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#!/bin/bash
# Create .env file and add environment variables
cat <<EOF > .env
REACT_APP_DEEPGRAM_API_KEY=your_deepgram_api_key
OPENAI_API_KEY2=your_openai_api_key
EOF
cp .env client/.env
cp .env app/.env
# create llamafile folder
mkdir llamafile
# Download and set up the Llama file
wget https://huggingface.co/Mozilla/Mistral-7B-Instruct-v0.2-llamafile/resolve/main/mistral-7b-instruct-v0.2.Q4_0.llamafile?download=true -O llamafile/mistral-7b-instruct-v0.2.Q4_0.llamafile
# run llamafile
chmod +x llamafile/mistral-7b-instruct-v0.2.Q4_0.llamafile
llamafile/mistral-7b-instruct-v0.2.Q4_0.llamafile --server --host 0.0.0.0 --port 8080 &
# Install frontend dependencies
cd client
npm install
# Install backend dependencies
cd ../app
pip install -r requirements.txt
# Start backend
uvicorn main:app --reload &
# Start frontend
cd ../client
npm start
# Access the application
echo "Open your browser and navigate to http://localhost:3000 to interact with the application."