-
-
Notifications
You must be signed in to change notification settings - Fork 82
/
docker-compose.yml
136 lines (122 loc) · 3 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
version: "3.3"
networks:
net:
driver: bridge
ipam:
config:
- subnet: 172.22.0.0/24
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.1.0
hostname: zookeeper
container_name: zookeeper-iot
ports:
- 2181:2181
networks:
- net
environment:
ZOOKEEPER_CLIENT_PORT: 2181
kafka:
image: confluentinc/cp-kafka:5.1.0
ports:
- 9092:9092
- 29092:29092
depends_on:
- zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.request.logger=WARN"
KAFKA_LOG4J_ROOT_LOGLEVEL: WARN
volumes:
- /var/run/docker.sock:/var/run/docker.sock
hostname: kafka
container_name: kafka-iot
networks:
- net
restart: always
cassandra:
image: 'bitnami/cassandra:latest'
hostname: cassandra
networks:
net:
ipv4_address: 172.22.0.6
ports:
- "9042:9042"
environment:
- "MAX_HEAP_SIZE=256M"
- "HEAP_NEWSIZE=128M"
container_name: cassandra-iot
volumes:
- ./data/schema.cql:/schema.cql
spark-master:
image: bde2020/spark-master:3.0.0-hadoop3.2-java11
container_name: spark-master
hostname: spark-master
healthcheck:
interval: 5s
retries: 100
ports:
- "8080:8080"
- "7077:7077"
- "4040:4040"
- "4041:4041"
environment:
- INIT_DAEMON_STEP=false
- SPARK_DRIVER_HOST=192.168.1.5
volumes:
- ./iot-spark-processor/target:/opt/spark-data
networks:
- net
spark-worker-1:
image: bde2020/spark-worker:3.0.0-hadoop3.2-java11
container_name: spark-worker-1
hostname: spark-worker-1
depends_on:
- spark-master
ports:
- "8081:8081"
environment:
- "SPARK_MASTER=spark://spark-master:7077"
volumes:
- ./data/spark/:/opt/spark-data
networks:
- net
namenode:
image: bde2020/hadoop-namenode:2.0.0-hadoop3.1.3-java8
container_name: namenode
hostname: namenode
volumes:
- ./data/namenode:/hadoop/dfs/name
environment:
- CLUSTER_NAME=test
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
healthcheck:
interval: 5s
retries: 100
networks:
- net
ports:
- 9870:9870
- 8020:8020
datanode:
image: bde2020/hadoop-datanode:2.0.0-hadoop3.1.3-java8
container_name: datanode
hostname: datanode
volumes:
- ./data/datanode:/hadoop/dfs/data
environment:
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
depends_on:
- namenode
healthcheck:
interval: 5s
retries: 100
networks:
- net
ports:
- 50075:50075
- 50010:50010