Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Kafka version update #59

Merged
merged 7 commits into from
Feb 22, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN apt-get -y install snmp

#-------------------Install Kafka----------------------------------
RUN mkdir ~/Downloads
RUN curl "https://archive.apache.org/dist/kafka/3.6.0/kafka_2.12-3.6.0.tgz" -o ~/Downloads/kafka.tgz
RUN curl "https://archive.apache.org/dist/kafka/3.6.1/kafka_2.12-3.6.1.tgz" -o ~/Downloads/kafka.tgz
RUN mkdir ~/kafka \
&& cd ~/kafka \
&& tar -xvzf ~/Downloads/kafka.tgz --strip 1
Expand Down
12 changes: 0 additions & 12 deletions .devcontainer/kafka
Original file line number Diff line number Diff line change
Expand Up @@ -8,32 +8,20 @@ export LOG_DIR=/var/log/kafka
case "$1" in
start)
# Start daemon.
echo "Starting Zookeeper";
$DAEMON_PATH/bin/zookeeper-server-start.sh -daemon $DAEMON_PATH/config/zookeeper.properties
echo "Starting Kafka";
$DAEMON_PATH/bin/kafka-server-start.sh -daemon $DAEMON_PATH/config/server.properties
;;
stop)
# Stop daemons.
echo "Shutting down Kafka";
$DAEMON_PATH/bin/kafka-server-stop.sh
sleep 2
echo "Shutting down Zookeeper";
$DAEMON_PATH/bin/zookeeper-server-stop.sh
;;
restart)
$0 stop
sleep 2
$0 start
;;
status)
pid=`ps ax | grep -i 'org.apache.zookeeper.server' | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]
then
echo "Zookeeper is Running as PID: $pid"
else
echo "Zookeeper is not Running"
fi
pid=`ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]
then
Expand Down
8 changes: 5 additions & 3 deletions .devcontainer/post-create.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
cd ~/kafka/
# start zookeeper
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"

bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties

# start kafka
bin/kafka-server-start.sh -daemon config/server.properties
bin/kafka-server-start.sh -daemon config/kraft/server.properties
# wait 2 seconds for the server to start and be able to add partitions
sleep 2s
# add topics
Expand Down
46 changes: 21 additions & 25 deletions docker-compose-ppm-nsv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,35 +4,32 @@

version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"

kafka:
image: wurstmeister/kafka
image: bitnami/kafka:latest
hostname: kafka
ports:
- "9092:9092"
volumes:
- "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
DOCKER_HOST_IP: ${DOCKER_HOST_IP}
ZK: ${DOCKER_HOST_IP}:2181
KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ENABLE_KRAFT: "yes"
KAFKA_CFG_PROCESS_ROLES: "broker,controller"
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
KAFKA_BROKER_ID: "1"
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1"
KAFKA_DELETE_TOPIC_ENABLED: "true"
KAFKA_CLEANUP_POLICY: "delete" # delete old logs
KAFKA_LOG_RETENTION_HOURS: 2
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
# This configuration controls the period of time after
# which Kafka will force the log to roll even if the segment
# file isn't full to ensure that retention can delete or compact old data.
depends_on:
- zookeeper
volumes:
- ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
max-size: "10m"
max-file: "5"

ode:
build: .
Expand Down Expand Up @@ -193,7 +190,6 @@ services:
SDW_PASSWORD: ${SDW_PASSWORD}
depends_on:
- kafka
- zookeeper
- ode

sec:
Expand Down
46 changes: 17 additions & 29 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,41 +1,30 @@
version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
logging:
options:
max-size: "10m"
max-file: "5"

kafka:
image: wurstmeister/kafka
image: bitnami/kafka:latest
dmccoystephenson marked this conversation as resolved.
Show resolved Hide resolved
hostname: kafka
ports:
- "9092:9092"
volumes:
- "${DOCKER_SHARED_VOLUME}:/bitnami"
dmccoystephenson marked this conversation as resolved.
Show resolved Hide resolved
environment:
DOCKER_HOST_IP: ${DOCKER_HOST_IP}
ZK: ${DOCKER_HOST_IP}:2181
KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ENABLE_KRAFT: "yes"
KAFKA_CFG_PROCESS_ROLES: "broker,controller"
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
KAFKA_BROKER_ID: "1"
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
KAFKA_DELETE_TOPIC_ENABLED: "true"
KAFKA_CLEANUP_POLICY: "delete" # delete old logs
KAFKA_LOG_RETENTION_HOURS: 2
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
# This configuration controls the period of time after
# which Kafka will force the log to roll even if the segment
# file isn't full to ensure that retention can delete or compact old data.
depends_on:
- zookeeper
volumes:
- ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock
KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
max-size: "10m"
max-size: "10m"
max-file: "5"

ode:
Expand Down Expand Up @@ -254,7 +243,6 @@ services:
SDW_API_KEY: ${SDW_API_KEY}
depends_on:
- kafka
- zookeeper
- ode
logging:
options:
Expand Down
3 changes: 1 addition & 2 deletions docs/Architecture.md
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ Docker is utilized as the primary deployment mechanism to
compartmentalize each of the designed micro-services into separate
containers. Docker is used to package all components in a composite of
containers each running a distinct service. The ODE application runs in
one container and other major frameworks such as ZooKeeper and Kafka run
one container and other major frameworks such as Kafka run
in their own separate containers.

<a name="appendix">
Expand All @@ -391,4 +391,3 @@ in their own separate containers.
| SCP | Secure Copy |
| US DOT | Unites States Department of Transportation |
| WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. |
| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. |
3 changes: 1 addition & 2 deletions docs/UserGuide.md
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ This document is intended for use by the ODE client applications.
| TIM | Traveler Information Message |
| US DOT | Unites States Department of Transportation |
| WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. |
| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. |

<a name="ode-development-environment">

Expand Down Expand Up @@ -579,7 +578,7 @@ ODE uses Logback logging framework to log application and data events.

#### 7.2.3 - Steps to turn on/off logging during application runtime.

1. Start ode, Kafka, and Zookeeper as normal.
1. Start ode and Kafka as normal.

2. In a new terminal window run \"jconsole\".

Expand Down
48 changes: 18 additions & 30 deletions docs/dockerhub.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ The image expects the following environment variables to be set:
## Direct Dependencies
The ODE will fail to start up if the following containers/services are not already present:
- Kafka or Confluent & related requirements
- Zookeeper (relied on by Kafka when run locally)

## Indirect Dependencies
Some functionality will be unreachable without the participation of the following programs (except by directly pushing to kafka topics):
Expand All @@ -35,42 +34,31 @@ For further configuration options, see the [GitHub repository](https://github.co
```
version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
logging:
options:
max-size: "10m"
max-file: "5"

kafka:
image: wurstmeister/kafka
image: bitnami/kafka:latest
hostname: kafka
ports:
- "9092:9092"
volumes:
- "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
DOCKER_HOST_IP: ${DOCKER_HOST_IP}
ZK: ${DOCKER_HOST_IP}:2181
KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ENABLE_KRAFT: "yes"
KAFKA_CFG_PROCESS_ROLES: "broker,controller"
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
KAFKA_BROKER_ID: "1"
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1"
KAFKA_DELETE_TOPIC_ENABLED: "true"
KAFKA_CLEANUP_POLICY: "delete" # delete old logs
KAFKA_LOG_RETENTION_HOURS: 2
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
# This configuration controls the period of time after
# which Kafka will force the log to roll even if the segment
# file isn't full to ensure that retention can delete or compact old data.
depends_on:
- zookeeper
volumes:
- ${DOCKER_SHARED_VOLUME_WINDOWS}/var/run/docker.sock:/var/run/docker.sock
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
max-size: "10m"
max-size: "10m"
max-file: "5"

ode:
Expand Down
6 changes: 3 additions & 3 deletions jpo-ode-consumer-example/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ The IP used is the location of the Kafka endpoints.
####Create, alter, list, and describe topics.

```
kafka-topics --zookeeper 192.168.1.151:2181 --list
kafka-topics --bootstrap-server=192.168.1.151:9092 --list
sink1
t1
t2
Expand All @@ -58,11 +58,11 @@ t2
####Read data from a Kafka topic and write it to standard output.

```
kafka-console-consumer --zookeeper 192.168.1.151:2181 --topic topic.J2735Bsm
kafka-console-consumer --bootstrap-server=192.168.1.151:9092 --topic topic.J2735Bsm
```

####Read data from standard output and write it to a Kafka topic.

```
kafka-console-producer --broker-list 192.168.1.151:9092 --topic topic.J2735Bsm
kafka-console-producer --bootstrap-server=192.168.1.151:9092 --topic topic.J2735Bsm
```
Loading
Loading