This repository has been archived by the owner on Sep 23, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
94 lines (85 loc) · 4.31 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
services:
common-validator: &common-validator
image: ghcr.io/synapsec-ai/llm-defender-validator:v0.9.3
restart: unless-stopped
pull_policy: always
user: llm-defender-user
ports:
- "6000:6000"
volumes:
- llm-defender-subnet:/home/llm-defender-user/.llm-defender-subnet
- ${HOME}/.bittensor:/home/llm-defender-user/.bittensor
llm-defender-api: &llm-defender-api
restart: unless-stopped
pull_policy: always
image: ghcr.io/synapsec-ai/llm-defender-api:v0.9.3
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/subnet_api/main.py"
ports:
- "8080:8080"
volumes:
- ${HOME}/.bittensor:/home/llm-defender-user/.bittensor
environment:
- NETUID=${NETUID}
- SUBTENSOR_CHAIN_ENDPOINT=${SUBTENSOR_CHAIN_ENDPOINT}
- WALLET_NAME=${VALIDATOR_WALLET}
- WALLET_HOTKEY=${VALIDATOR_HOTKEY}
- API_LOG_LEVEL=${LOG_LEVEL}
- TOP_AXONS_ONLY=${TOP_AXONS_ONLY}
- AXONS_TO_QUERY=${AXONS_TO_QUERY}
llm-defender-validator-debug-mode:
<<: *common-validator
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/neurons/validator.py --netuid ${NETUID} --subtensor.chain_endpoint ${SUBTENSOR_CHAIN_ENDPOINT} --wallet.name ${VALIDATOR_WALLET} --wallet.hotkey ${VALIDATOR_HOTKEY} --log_level ${LOG_LEVEL} --debug_mode"
llm-defender-validator-remote-vllm:
<<: *common-validator
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/neurons/validator.py --netuid ${NETUID} --subtensor.chain_endpoint ${SUBTENSOR_CHAIN_ENDPOINT} --wallet.name ${VALIDATOR_WALLET} --wallet.hotkey ${VALIDATOR_HOTKEY} --log_level ${LOG_LEVEL} --vllm_base_url ${VLLM_BASE_URL} --vllm_api_key ${VLLM_API_KEY}"
llm-defender-validator:
<<: *common-validator
depends_on:
- prompt-generation-api
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/neurons/validator.py --netuid ${NETUID} --subtensor.chain_endpoint ${SUBTENSOR_CHAIN_ENDPOINT} --wallet.name ${VALIDATOR_WALLET} --wallet.hotkey ${VALIDATOR_HOTKEY} --log_level ${LOG_LEVEL}"
llm-defender-validator-debug-mode-dev:
<<: *common-validator
build:
context: .
dockerfile: validator.Dockerfile
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/neurons/validator.py --netuid ${NETUID} --subtensor.chain_endpoint ${SUBTENSOR_CHAIN_ENDPOINT} --wallet.name ${VALIDATOR_WALLET} --wallet.hotkey ${VALIDATOR_HOTKEY} --log_level ${LOG_LEVEL} --debug_mode"
llm-defender-validator-remote-vllm-dev:
<<: *common-validator
build:
context: .
dockerfile: validator.Dockerfile
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/neurons/validator.py --netuid ${NETUID} --subtensor.chain_endpoint ${SUBTENSOR_CHAIN_ENDPOINT} --wallet.name ${VALIDATOR_WALLET} --wallet.hotkey ${VALIDATOR_HOTKEY} --log_level ${LOG_LEVEL} --vllm_base_url ${VLLM_BASE_URL} --vllm_api_key ${VLLM_API_KEY} --vllm_model_name ${VLLM_MODEL_NAME}"
llm-defender-validator-dev:
<<: *common-validator
build:
context: .
dockerfile: validator.Dockerfile
depends_on:
- prompt-generation-api
command: /bin/bash -c "source /llm-defender-subnet/.venv/bin/activate && python3 /llm-defender-subnet/llm_defender/neurons/validator.py --netuid ${NETUID} --subtensor.chain_endpoint ${SUBTENSOR_CHAIN_ENDPOINT} --wallet.name ${VALIDATOR_WALLET} --wallet.hotkey ${VALIDATOR_HOTKEY} --log_level ${LOG_LEVEL}"
prompt-generation-api:
restart: unless-stopped
image: vllm/vllm-openai:v0.5.0
ports:
- "8000:8000"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ipc: host
command: [
"--model", "${VLLM_MODEL_NAME}",
"--tensor-parallel-size", "${TENSOR_PARALLEL_SIZE}"
]
volumes:
- ${HOME}/.cache/huggingface:/root/.cache/huggingface
llm-defender-api-dev:
<<: *llm-defender-api
build:
context: .
dockerfile: api.Dockerfile
volumes:
llm-defender-subnet: