-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
140 lines (132 loc) · 3.34 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
networks:
netw:
driver: bridge
ipam:
config:
- subnet: 172.23.0.0/24
services:
zookeeper:
image: confluentinc/cp-zookeeper:5.1.0
hostname: zookeeper
container_name: zookeeper-iot
ports:
- 2182:2181
networks:
- netw
environment:
ZOOKEEPER_CLIENT_PORT: 2182
kafka:
image: confluentinc/cp-kafka:5.1.0
ports:
- 9092:9092
- 29092:29092
depends_on:
- zookeeper
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2182"
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,PLAINTEXT_HOST://localhost:29092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.request.logger=WARN"
KAFKA_LOG4J_ROOT_LOGLEVEL: WARN
volumes:
- /var/run/docker.sock:/var/run/docker.sock
hostname: kafka
container_name: kafka-iot
networks:
- netw
restart: always
cassandra:
image: 'bitnami/cassandra:latest'
hostname: cassandra
networks:
netw:
ipv4_address: 172.23.0.6
ports:
- "9042:9042"
environment:
- "MAX_HEAP_SIZE=256M"
- "HEAP_NEWSIZE=128M"
container_name: cassandra-iot
volumes:
- C:/Users/samso/Documents/Programming/BigData Speed Processing With Spark Streaming/Manip2_Speed Layer Spark Streaming, kafka et cassandra/data/schema.cql:/schema.cql
spark-master:
image: bde2020/spark-master:3.0.0-hadoop3.2-java11
container_name: spark-master
hostname: spark-master
healthcheck:
interval: 5s
retries: 100
ports:
- "8080:8080"
- "7077:7077"
- "4040:4040"
- "4041:4041"
environment:
- INIT_DAEMON_STEP=false
- SPARK_DRIVER_HOST=192.168.1.5
volumes:
- ./spark-processor/target:/opt/spark-data
networks:
- netw
spark-worker-1:
image: bde2020/spark-worker:3.0.0-hadoop3.2-java11
container_name: spark-worker-1
hostname: spark-worker-1
depends_on:
- spark-master
ports:
- "8081:8081"
environment:
- "SPARK_MASTER=spark://spark-master:7077"
volumes:
- ./data/spark/:/opt/spark-data
networks:
- netw
namenode:
image: bde2020/hadoop-namenode:2.0.0-hadoop3.1.3-java8
container_name: namenode
hostname: namenode
volumes:
- ./data/namenode:/hadoop/dfs/name
environment:
- CLUSTER_NAME=test
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
healthcheck:
interval: 5s
retries: 100
networks:
- netw
ports:
- 9870:9870
- 8020:8020
datanode:
image: bde2020/hadoop-datanode:2.0.0-hadoop3.1.3-java8
container_name: datanode
hostname: datanode
volumes:
- ./data/datanode:/hadoop/dfs/data
environment:
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
depends_on:
- namenode
healthcheck:
interval: 5s
retries: 100
networks:
- netw
ports:
- 50075:50075
- 50010:50010
dashboard:
image: openjdk:8-jre-slim
container_name: dashboard
hostname: dashboard
ports:
- "3000:3000"
volumes:
- ./Serving-Layer/target/dashboard-1.0.0.jar:/app.jar
command: ["java", "-jar", "/app.jar"]
networks:
- netw