Skip to content

Commit

Permalink
Merge branch 'dev' into mongo-archiving
Browse files Browse the repository at this point in the history
  • Loading branch information
Michael7371 committed Mar 27, 2024
2 parents 1aba554 + 8d7f869 commit 6d02416
Show file tree
Hide file tree
Showing 17 changed files with 114 additions and 136 deletions.
2 changes: 1 addition & 1 deletion .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN apt-get -y install snmp

#-------------------Install Kafka----------------------------------
RUN mkdir ~/Downloads
RUN curl "https://archive.apache.org/dist/kafka/3.6.0/kafka_2.12-3.6.0.tgz" -o ~/Downloads/kafka.tgz
RUN curl "https://archive.apache.org/dist/kafka/3.6.1/kafka_2.12-3.6.1.tgz" -o ~/Downloads/kafka.tgz
RUN mkdir ~/kafka \
&& cd ~/kafka \
&& tar -xvzf ~/Downloads/kafka.tgz --strip 1
Expand Down
12 changes: 0 additions & 12 deletions .devcontainer/kafka
Original file line number Diff line number Diff line change
Expand Up @@ -8,32 +8,20 @@ export LOG_DIR=/var/log/kafka
case "$1" in
start)
# Start daemon.
echo "Starting Zookeeper";
$DAEMON_PATH/bin/zookeeper-server-start.sh -daemon $DAEMON_PATH/config/zookeeper.properties
echo "Starting Kafka";
$DAEMON_PATH/bin/kafka-server-start.sh -daemon $DAEMON_PATH/config/server.properties
;;
stop)
# Stop daemons.
echo "Shutting down Kafka";
$DAEMON_PATH/bin/kafka-server-stop.sh
sleep 2
echo "Shutting down Zookeeper";
$DAEMON_PATH/bin/zookeeper-server-stop.sh
;;
restart)
$0 stop
sleep 2
$0 start
;;
status)
pid=`ps ax | grep -i 'org.apache.zookeeper.server' | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]
then
echo "Zookeeper is Running as PID: $pid"
else
echo "Zookeeper is not Running"
fi
pid=`ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}'`
if [ -n "$pid" ]
then
Expand Down
8 changes: 5 additions & 3 deletions .devcontainer/post-create.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
cd ~/kafka/
# start zookeeper
bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
KAFKA_CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"

bin/kafka-storage.sh format -t $KAFKA_CLUSTER_ID -c config/kraft/server.properties

# start kafka
bin/kafka-server-start.sh -daemon config/server.properties
bin/kafka-server-start.sh -daemon config/kraft/server.properties
# wait 2 seconds for the server to start and be able to add partitions
sleep 2s
# add topics
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ settings.xml
/.settings
.metadata
*.pyc
/kafka/

####################
### jpo-ode-svcs ###
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,7 @@ The following guide contains information about the data flow diagrams for the OD
- Additionally `git` commands may fail for similar reasons, you can fix this by running `export GIT_SSL_NO_VERIFY=1`.
- Windows users may find more information on installing and using Docker [here](https://github.com/usdot-jpo-ode/jpo-ode/wiki/Docker-management).
- Users interested in Kafka may find more guidance and configuration options [here](docker/kafka/README.md).
- A compatibility guide containing recommendations for suitable versions of submodules for each main module version can be found [here](docs/compatibility.md).

**Configuration:**

Expand Down
46 changes: 21 additions & 25 deletions docker-compose-ppm-nsv.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,35 +4,32 @@

version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"

kafka:
image: wurstmeister/kafka
image: bitnami/kafka:latest
hostname: kafka
ports:
- "9092:9092"
volumes:
- "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
DOCKER_HOST_IP: ${DOCKER_HOST_IP}
ZK: ${DOCKER_HOST_IP}:2181
KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ENABLE_KRAFT: "yes"
KAFKA_CFG_PROCESS_ROLES: "broker,controller"
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
KAFKA_BROKER_ID: "1"
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1"
KAFKA_DELETE_TOPIC_ENABLED: "true"
KAFKA_CLEANUP_POLICY: "delete" # delete old logs
KAFKA_LOG_RETENTION_HOURS: 2
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
# This configuration controls the period of time after
# which Kafka will force the log to roll even if the segment
# file isn't full to ensure that retention can delete or compact old data.
depends_on:
- zookeeper
volumes:
- ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
max-size: "10m"
max-file: "5"

ode:
build: .
Expand Down Expand Up @@ -193,7 +190,6 @@ services:
SDW_PASSWORD: ${SDW_PASSWORD}
depends_on:
- kafka
- zookeeper
- ode

sec:
Expand Down
46 changes: 17 additions & 29 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -1,41 +1,30 @@
version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
logging:
options:
max-size: "10m"
max-file: "5"

kafka:
image: wurstmeister/kafka
image: bitnami/kafka:latest
hostname: kafka
ports:
- "9092:9092"
volumes:
- "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
DOCKER_HOST_IP: ${DOCKER_HOST_IP}
ZK: ${DOCKER_HOST_IP}:2181
KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ENABLE_KRAFT: "yes"
KAFKA_CFG_PROCESS_ROLES: "broker,controller"
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
KAFKA_BROKER_ID: "1"
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
KAFKA_DELETE_TOPIC_ENABLED: "true"
KAFKA_CLEANUP_POLICY: "delete" # delete old logs
KAFKA_LOG_RETENTION_HOURS: 2
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
# This configuration controls the period of time after
# which Kafka will force the log to roll even if the segment
# file isn't full to ensure that retention can delete or compact old data.
depends_on:
- zookeeper
volumes:
- ${DOCKER_SHARED_VOLUME}/var/run/docker.sock:/var/run/docker.sock
KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
max-size: "10m"
max-size: "10m"
max-file: "5"

ode:
Expand Down Expand Up @@ -254,7 +243,6 @@ services:
SDW_API_KEY: ${SDW_API_KEY}
depends_on:
- kafka
- zookeeper
- ode
logging:
options:
Expand Down
3 changes: 1 addition & 2 deletions docs/Architecture.md
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ Docker is utilized as the primary deployment mechanism to
compartmentalize each of the designed micro-services into separate
containers. Docker is used to package all components in a composite of
containers each running a distinct service. The ODE application runs in
one container and other major frameworks such as ZooKeeper and Kafka run
one container and other major frameworks such as Kafka run
in their own separate containers.

<a name="appendix">
Expand All @@ -391,4 +391,3 @@ in their own separate containers.
| SCP | Secure Copy |
| US DOT | Unites States Department of Transportation |
| WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. |
| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. |
3 changes: 1 addition & 2 deletions docs/UserGuide.md
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ This document is intended for use by the ODE client applications.
| TIM | Traveler Information Message |
| US DOT | Unites States Department of Transportation |
| WebSocket | WebSocket is designed to be implemented in web browsers and web servers, but it can be used by any client or server application. The WebSocket Protocol is an independent TCP-based protocol. Its only relationship to HTTP is that its handshake is interpreted by HTTP servers as an Upgrade request. |
| ZooKeeper | Apache ZooKeeper is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. |

<a name="ode-development-environment">

Expand Down Expand Up @@ -579,7 +578,7 @@ ODE uses Logback logging framework to log application and data events.

#### 7.2.3 - Steps to turn on/off logging during application runtime.

1. Start ode, Kafka, and Zookeeper as normal.
1. Start ode and Kafka as normal.

2. In a new terminal window run \"jconsole\".

Expand Down
12 changes: 12 additions & 0 deletions docs/compatibility.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Submodule Compatibility Guide
This table serves as a guide, suggesting which versions of individual submodules are best suited to accompany each version of the main module. It helps users ensure compatibility and smooth integration by recommending specific submodule versions for their chosen main module version.

| [ODE (this project)](https://github.com/usdot-jpo-ode/jpo-ode/releases) | [ACM](https://github.com/usdot-jpo-ode/asn1_codec/releases) | [PPM](https://github.com/usdot-jpo-ode/jpo-cvdp/releases) | [SEC](https://github.com/usdot-jpo-ode/jpo-security-svcs/releases) | [SDWD](https://github.com/usdot-jpo-ode/jpo-sdw-depositor/releases) | [S3D](https://github.com/usdot-jpo-ode/jpo-s3-deposit/releases) | [GJConverter](https://github.com/usdot-jpo-ode/jpo-geojsonconverter/releases) | [CMonitor](https://github.com/usdot-jpo-ode/jpo-conflictmonitor/releases) | [CVisualizer](https://github.com/usdot-jpo-ode/jpo-conflictvisualizer/releases) | [CVManager](https://github.com/usdot-jpo-ode/jpo-cvmanager/releases) |
| ----------------- | --- | --- | --- | ---- | --- | ----------- | -------- | ----------- | ----------- |
| 2.0.1 | 2.0.0 | 1.3.0 | 1.4.0 | 1.6.0 | 1.4.0 | 1.2.0 | 1.2.0 | 1.2.0 | 1.2.0 |
| 1.5.1 | 1.5.0 | 1.2.0 | 1.3.0 | 1.5.0 | 1.3.0 | 1.1.0 | 1.1.0 | 1.1.0 | 1.1.0 |
| 1.4.1 | 1.4.1 | 1.1.1 | 1.2.1 | 1.4.1 | 1.2.1 | 1.0.0 | 1.0.1 | 1.0.1 | 1.0.1 |
| 1.4.0 | 1.4.0 | 1.1.0 | 1.2.0 | 1.4.0 | 1.2.0 | N/A | N/A | N/A | N/A |
| 1.3.0 | 1.3.0 | 1.0.0 | 1.0.1 | 1.3.0 | 1.1.0 | N/A | N/A | N/A | N/A |

For example, if you're using ODE version 2.0.1, it's recommended to use ACM 2.0.0, PPM 1.3.0, SEC 1.4.0, SDWD 1.6.0, S3D 1.4.0, GJConverter 1.2.0, CMonitor 1.2.0, CVisualizer 1.2.0, and CVManager 1.2.0. While other combinations may work, these versions are suggested for the best compatibility.
48 changes: 18 additions & 30 deletions docs/dockerhub.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ The image expects the following environment variables to be set:
## Direct Dependencies
The ODE will fail to start up if the following containers/services are not already present:
- Kafka or Confluent & related requirements
- Zookeeper (relied on by Kafka when run locally)

## Indirect Dependencies
Some functionality will be unreachable without the participation of the following programs (except by directly pushing to kafka topics):
Expand All @@ -35,42 +34,31 @@ For further configuration options, see the [GitHub repository](https://github.co
```
version: '3'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
logging:
options:
max-size: "10m"
max-file: "5"
kafka:
image: wurstmeister/kafka
image: bitnami/kafka:latest
hostname: kafka
ports:
- "9092:9092"
volumes:
- "${DOCKER_SHARED_VOLUME}:/bitnami"
environment:
DOCKER_HOST_IP: ${DOCKER_HOST_IP}
ZK: ${DOCKER_HOST_IP}:2181
KAFKA_ADVERTISED_HOST_NAME: ${DOCKER_HOST_IP}
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ENABLE_KRAFT: "yes"
KAFKA_CFG_PROCESS_ROLES: "broker,controller"
KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_CFG_LISTENERS: "PLAINTEXT://:9094,CONTROLLER://:9093,EXTERNAL://:9092"
KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT"
KAFKA_CFG_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9094,EXTERNAL://${DOCKER_HOST_IP}:9092"
KAFKA_BROKER_ID: "1"
KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@kafka:9093"
ALLOW_PLAINTEXT_LISTENER: "yes"
KAFKA_CFG_NODE_ID: "1"
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1"
KAFKA_DELETE_TOPIC_ENABLED: "true"
KAFKA_CLEANUP_POLICY: "delete" # delete old logs
KAFKA_LOG_RETENTION_HOURS: 2
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 3000
KAFKA_RETENTION_MS: 7200000 # delete old logs after 2 hours
KAFKA_SEGMENT_MS: 7200000 # roll segment logs every 2 hours.
# This configuration controls the period of time after
# which Kafka will force the log to roll even if the segment
# file isn't full to ensure that retention can delete or compact old data.
depends_on:
- zookeeper
volumes:
- ${DOCKER_SHARED_VOLUME_WINDOWS}/var/run/docker.sock:/var/run/docker.sock
KAFKA_CREATE_TOPICS: "topic.OdeBsmPojo:1:1,topic.OdeSpatTxPojo:1:1,topic.OdeSpatPojo:1:1,topic.OdeSpatJson:1:1,topic.FilteredOdeSpatJson:1:1,topic.OdeSpatRxJson:1:1,topic.OdeSpatRxPojo:1:1,topic.OdeBsmJson:1:1,topic.FilteredOdeBsmJson:1:1,topic.OdeTimJson:1:1,topic.OdeTimBroadcastJson:1:1,topic.J2735TimBroadcastJson:1:1,topic.OdeDriverAlertJson:1:1,topic.Asn1DecoderInput:1:1,topic.Asn1DecoderOutput:1:1,topic.Asn1EncoderInput:1:1,topic.Asn1EncoderOutput:1:1,topic.SDWDepositorInput:1:1,topic.OdeTIMCertExpirationTimeJson:1:1,topic.OdeRawEncodedBSMJson:1:1,topic.OdeRawEncodedSPATJson:1:1,topic.OdeRawEncodedTIMJson:1:1,topic.OdeRawEncodedMAPJson:1:1,topic.OdeMapTxPojo:1:1,topic.OdeMapJson:1:1,topic.OdeRawEncodedSSMJson:1:1,topic.OdeSsmPojo:1:1,topic.OdeSsmJson:1:1,topic.OdeRawEncodedSRMJson:1:1,topic.OdeSrmTxPojo:1:1,topic.OdeSrmJson:1:1,topic.OdeRawEncodedPSMJson:1:1,topic.OdePsmTxPojo:1:1,topic.OdePsmJson:1:1"
KAFKA_CFG_DELETE_TOPIC_ENABLE: "true"
KAFKA_CFG_LOG_RETENTION_HOURS: 2
logging:
options:
max-size: "10m"
max-size: "10m"
max-file: "5"
ode:
Expand Down
6 changes: 3 additions & 3 deletions jpo-ode-consumer-example/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ The IP used is the location of the Kafka endpoints.
####Create, alter, list, and describe topics.

```
kafka-topics --zookeeper 192.168.1.151:2181 --list
kafka-topics --bootstrap-server=192.168.1.151:9092 --list
sink1
t1
t2
Expand All @@ -58,11 +58,11 @@ t2
####Read data from a Kafka topic and write it to standard output.

```
kafka-console-consumer --zookeeper 192.168.1.151:2181 --topic topic.J2735Bsm
kafka-console-consumer --bootstrap-server=192.168.1.151:9092 --topic topic.J2735Bsm
```

####Read data from standard output and write it to a Kafka topic.

```
kafka-console-producer --broker-list 192.168.1.151:9092 --topic topic.J2735Bsm
kafka-console-producer --bootstrap-server=192.168.1.151:9092 --topic topic.J2735Bsm
```
Loading

0 comments on commit 6d02416

Please sign in to comment.